/arch/arm64/crypto/ |
D | aes-glue.c | 59 int rounds, int blocks, int first); 61 int rounds, int blocks, int first); 64 int rounds, int blocks, u8 iv[], int first); 66 int rounds, int blocks, u8 iv[], int first); 69 int rounds, int blocks, u8 ctr[], int first); 72 int rounds, int blocks, u8 const rk2[], u8 iv[], 75 int rounds, int blocks, u8 const rk2[], u8 iv[], 110 unsigned int blocks; in ecb_encrypt() local 117 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { in ecb_encrypt() 119 (u8 *)ctx->key_enc, rounds, blocks, first); in ecb_encrypt() [all …]
|
D | ghash-ce-glue.c | 36 asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src, 57 int blocks; in ghash_update() local 67 blocks = len / GHASH_BLOCK_SIZE; in ghash_update() 71 pmull_ghash_update(blocks, ctx->digest, src, key, in ghash_update() 74 src += blocks * GHASH_BLOCK_SIZE; in ghash_update()
|
D | sha1-ce-glue.c | 30 int blocks); 33 u8 const *src, int blocks) in __cfi_sha1_ce_transform() argument 35 sha1_ce_transform((struct sha1_ce_state *)sst, src, blocks); in __cfi_sha1_ce_transform()
|
D | sha2-ce-glue.c | 30 int blocks); 33 u8 const *src, int blocks) in __cfi_sha2_ce_transform() argument 35 sha2_ce_transform((struct sha256_ce_state *)sst, src, blocks); in __cfi_sha2_ce_transform()
|
/arch/arm/crypto/ |
D | aes-ce-glue.c | 29 int rounds, int blocks); 31 int rounds, int blocks); 34 int rounds, int blocks, u8 iv[]); 36 int rounds, int blocks, u8 iv[]); 39 int rounds, int blocks, u8 ctr[]); 42 int rounds, int blocks, u8 iv[], 45 int rounds, int blocks, u8 iv[], 181 unsigned int blocks; in ecb_encrypt() local 189 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { in ecb_encrypt() 191 (u8 *)ctx->key_enc, num_rounds(ctx), blocks); in ecb_encrypt() [all …]
|
D | aesbs-glue.c | 34 asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks, 122 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; in aesbs_cbc_encrypt() local 133 } while (--blocks); in aesbs_cbc_encrypt() 144 } while (--blocks); in aesbs_cbc_encrypt() 170 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; in aesbs_cbc_decrypt() local 178 memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE); in aesbs_cbc_decrypt() 184 iv = bk[blocks & 1]; in aesbs_cbc_decrypt() 190 } while (--blocks); in aesbs_cbc_decrypt() 215 u32 blocks; in aesbs_ctr_encrypt() local 221 while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) { in aesbs_ctr_encrypt() [all …]
|
D | ghash-ce-glue.c | 43 asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src, 64 int blocks; in ghash_update() local 74 blocks = len / GHASH_BLOCK_SIZE; in ghash_update() 78 pmull_ghash_update(blocks, ctx->digest, src, key, in ghash_update() 81 src += blocks * GHASH_BLOCK_SIZE; in ghash_update()
|
D | sha1-ce-glue.c | 28 int blocks);
|
D | sha512-neon-glue.c | 26 int blocks);
|
D | sha2-ce-glue.c | 29 int blocks);
|
D | sha512-glue.c | 31 asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
|
D | aes-ce-core.S | 321 bmi .Lctrhalfblock @ blocks < 0 means 1/2 block 408 vld1.8 {q0-q1}, [r1, :64]! @ get 3 pt blocks 419 vst1.8 {q0-q1}, [r0, :64]! @ write 3 ct blocks 459 vld1.8 {q0-q1}, [r1, :64]! @ get 3 ct blocks 470 vst1.8 {q0-q1}, [r0, :64]! @ write 3 pt blocks
|
/arch/m68k/emu/ |
D | nfblock.c | 41 static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, in nfhd_get_capacity() argument 45 virt_to_phys(blocks), virt_to_phys(blocksize)); in nfhd_get_capacity() 56 u32 blocks, bsize; member 87 geo->cylinders = dev->blocks >> (6 - dev->bshift); in nfhd_getgeo() 99 static int __init nfhd_init_one(int id, u32 blocks, u32 bsize) in nfhd_init_one() argument 105 blocks, bsize); in nfhd_init_one() 117 dev->blocks = blocks; in nfhd_init_one() 138 set_capacity(dev->disk, (sector_t)blocks * (bsize / 512)); in nfhd_init_one() 157 u32 blocks, bsize; in nfhd_init() local 171 if (nfhd_get_capacity(i, 0, &blocks, &bsize)) in nfhd_init() [all …]
|
/arch/x86/crypto/ |
D | poly1305_glue.c | 35 const u32 *r, unsigned int blocks); 37 unsigned int blocks, const u32 *u); 40 unsigned int blocks, const u32 *u); 71 unsigned int blocks, datalen; in poly1305_simd_blocks() local 96 blocks = srclen / (POLY1305_BLOCK_SIZE * 4); in poly1305_simd_blocks() 97 poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u); in poly1305_simd_blocks() 98 src += POLY1305_BLOCK_SIZE * 4 * blocks; in poly1305_simd_blocks() 99 srclen -= POLY1305_BLOCK_SIZE * 4 * blocks; in poly1305_simd_blocks() 108 blocks = srclen / (POLY1305_BLOCK_SIZE * 2); in poly1305_simd_blocks() 109 poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u); in poly1305_simd_blocks() [all …]
|
/arch/arm/mach-ixp4xx/ |
D | ixp4xx_npe.c | 523 struct dl_block blocks[0]; in npe_load_firmware() member 533 int i, j, err, data_size, instr_size, blocks, table_end; in npe_load_firmware() local 604 for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size; in npe_load_firmware() 605 blocks++) in npe_load_firmware() 606 if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF) in npe_load_firmware() 608 if (blocks * sizeof(struct dl_block) / 4 >= image->size) { in npe_load_firmware() 615 print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks); in npe_load_firmware() 618 table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */; in npe_load_firmware() 619 for (i = 0, blk = image->blocks; i < blocks; i++, blk++) { in npe_load_firmware()
|
/arch/powerpc/kernel/ |
D | rtas_flash.c | 114 struct flash_block blocks[FLASH_BLOCKS_PER_NODE]; member 177 if (f->blocks[i].data == NULL) { in flash_list_valid() 180 block_size = f->blocks[i].length; in flash_list_valid() 205 kmem_cache_free(flash_block_cache, f->blocks[i].data); in free_flash_list() 361 fl->blocks[next_free].data = p; in rtas_flash_write() 362 fl->blocks[next_free].length = count; in rtas_flash_write() 614 f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data)); in rtas_flash_firmware() 615 image_size += f->blocks[i].length; in rtas_flash_firmware() 616 f->blocks[i].length = cpu_to_be64(f->blocks[i].length); in rtas_flash_firmware()
|
/arch/cris/boot/rescue/ |
D | head_v32.S | 19 ;; Start clocks for used blocks.
|
/arch/x86/kernel/cpu/mcheck/ |
D | mce_amd.c | 891 if (per_cpu(threshold_banks, cpu)[bank]->blocks) { in allocate_threshold_blocks() 893 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); in allocate_threshold_blocks() 895 per_cpu(threshold_banks, cpu)[bank]->blocks = b; in allocate_threshold_blocks() 928 struct list_head *head = &b->blocks->miscj; in __threshold_add_blocks() 933 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name); in __threshold_add_blocks() 1051 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { in deallocate_threshold_block() 1057 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); in deallocate_threshold_block() 1058 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; in deallocate_threshold_block() 1068 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj) in __threshold_remove_blocks() 1081 if (!b->blocks) in threshold_remove_bank()
|
/arch/arm/boot/dts/ |
D | sama5d3_tcb1.dtsi | 3 * 2 TC blocks.
|
/arch/metag/mm/ |
D | Kconfig | 47 these blocks by binding them to nodes and allowing 57 blocks into "zones", where each zone is a power of two number of 60 blocks of physically contiguous memory, then you may need to
|
/arch/cris/boot/compressed/ |
D | head_v32.S | 23 ;; Start clocks for used blocks.
|
/arch/mips/cavium-octeon/ |
D | Kconfig | 20 This selects the size of CVMSEG LM, which is in cache blocks. The 21 legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
|
/arch/x86/include/asm/ |
D | amd_nb.h | 51 struct threshold_block *blocks; member
|
/arch/metag/lib/ |
D | memcpy.S | 49 ! blocks, then jump to the unaligned copy loop or fall through to the aligned 53 LSR D1Ar5, D1Ar3, #3 ! D1Ar5 = number of 8 byte blocks 59 LSRS D1Ar5, D1Ar3, #5 ! D1Ar5 = number of 32 byte blocks
|
/arch/c6x/ |
D | Kconfig | 94 blocks into "zones", where each zone is a power of two number of 97 blocks of physically contiguous memory, then you may need to
|