| /kernel/linux/linux-6.6/arch/riscv/lib/ |
| D | uaccess.S | 43 * Copy first bytes until dst is aligned to word boundary. 45 * t1 - start of aligned dst 49 /* dst is already aligned, skip */ 57 bltu a0, t1, 1b /* t1 - start of aligned dst */ 61 * Now dst is aligned. 63 * Use word-copy if both src and dst are aligned because 72 * Both src and dst are aligned, unrolled word copy 74 * a0 - start of aligned dst 75 * a1 - start of aligned src 76 * t0 - end of aligned dst [all …]
|
| /kernel/linux/linux-6.6/tools/testing/selftests/mm/ |
| D | mremap_test.c | 41 _1KB = 1ULL << 10, /* 1KB -> not page aligned */ 253 * Check that the address is aligned to the specified alignment. in get_source_mapping() 256 * 2MB-aligned, however it will not be considered valid for a in get_source_mapping() 469 "mremap - Destination Address Misaligned (1KB-aligned)"); in main() 472 "mremap - Source Address Misaligned (1KB-aligned)"); in main() 474 /* Src addr PTE aligned */ in main() 477 "8KB mremap - Source PTE-aligned, Destination PTE-aligned"); in main() 479 /* Src addr 1MB aligned */ in main() 481 "2MB mremap - Source 1MB-aligned, Destination PTE-aligned"); in main() 483 "2MB mremap - Source 1MB-aligned, Destination 1MB-aligned"); in main() [all …]
|
| /kernel/linux/linux-6.6/drivers/scsi/ |
| D | ipr.h | 333 }__attribute__((packed, aligned (4))); 411 }__attribute__ ((packed, aligned (4))); 434 }__attribute__ ((packed, aligned (8))); 441 }__attribute__((packed, aligned (4))); 448 }__attribute__((packed, aligned (4))); 453 }__attribute__((packed, aligned (4))); 458 }__attribute__((packed, aligned (8))); 473 }__attribute__((packed, aligned (4))); 481 }__attribute__((packed, aligned (4))); 543 }__attribute__ ((packed, aligned(4))); [all …]
|
| /kernel/linux/linux-5.10/drivers/scsi/ |
| D | ipr.h | 336 }__attribute__((packed, aligned (4))); 414 }__attribute__ ((packed, aligned (4))); 436 }__attribute__ ((packed, aligned (8))); 443 }__attribute__((packed, aligned (4))); 450 }__attribute__((packed, aligned (4))); 455 }__attribute__((packed, aligned (4))); 460 }__attribute__((packed, aligned (8))); 475 }__attribute__((packed, aligned (4))); 483 }__attribute__((packed, aligned (4))); 546 }__attribute__ ((packed, aligned(4))); [all …]
|
| /kernel/linux/linux-5.10/arch/xtensa/include/asm/ |
| D | coprocessor.h | 118 #define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); 119 #define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); 122 __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN))); 124 __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN))); 129 __attribute__ ((aligned (XCHAL_CP0_SA_ALIGN))); 131 __attribute__ ((aligned (XCHAL_CP1_SA_ALIGN))); 133 __attribute__ ((aligned (XCHAL_CP2_SA_ALIGN))); 135 __attribute__ ((aligned (XCHAL_CP3_SA_ALIGN))); 137 __attribute__ ((aligned (XCHAL_CP4_SA_ALIGN))); 139 __attribute__ ((aligned (XCHAL_CP5_SA_ALIGN))); [all …]
|
| /kernel/linux/linux-6.6/arch/xtensa/lib/ |
| D | memset.S | 23 * If the destination is aligned, 27 * setting 1B and 2B and then go to aligned case. 29 * case of an aligned destination (except for the branches to 47 .L0: # return here from .Ldstunaligned when dst is aligned 54 * Destination is word-aligned. 56 # set 16 bytes per iteration for word-aligned dst 106 bbci.l a5, 0, .L20 # branch if dst alignment half-aligned 107 # dst is only byte aligned 112 # now retest if dst aligned 113 bbci.l a5, 1, .L0 # if now aligned, return to main algorithm [all …]
|
| D | memcopy.S | 34 * If source is aligned, 40 * case of aligned source and destination and multiple 89 .Ldst1mod2: # dst is only byte aligned 98 _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then 100 .Ldst2mod4: # dst 16-bit aligned 110 j .Ldstaligned # dst is now aligned, return to main algorithm 121 .Ldstaligned: # return here from .Ldst?mod? once dst is aligned 124 movi a8, 3 # if source is not aligned, 127 * Destination and source are word-aligned, use word copy. 129 # copy 16 bytes per iteration for word-aligned dst and word-aligned src [all …]
|
| D | usercopy.S | 30 * If the destination and source are both aligned, 33 * If destination is aligned and source unaligned, 38 * case of aligned destinations (except for the branches to 75 .Ldstaligned: # return here from .Ldstunaligned when dst is aligned 78 movi a8, 3 # if source is also aligned, 89 .Ldst1mod2: # dst is only byte aligned 98 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then 100 .Ldst2mod4: # dst 16-bit aligned 110 j .Ldstaligned # dst is now aligned, return to main algorithm 138 * Destination and source are word-aligned. [all …]
|
| D | checksum.S | 44 * is aligned on either a 2-byte or 4-byte boundary. 48 bnez a5, 8f /* branch if 2-byte aligned */ 112 /* uncommon case, buf is 2-byte aligned */ 118 bnez a5, 8f /* branch if 1-byte aligned */ 124 j 1b /* now buf is 4-byte aligned */ 126 /* case: odd-byte aligned, len > 1 188 This function is optimized for 4-byte aligned addresses. Other 199 aligned case. Two bbsi.l instructions might seem more optimal 206 beqz a9, 1f /* branch if both are 4-byte aligned */ 208 j 3f /* one address is 2-byte aligned */ [all …]
|
| /kernel/linux/linux-5.10/arch/xtensa/lib/ |
| D | memset.S | 23 * If the destination is aligned, 27 * setting 1B and 2B and then go to aligned case. 29 * case of an aligned destination (except for the branches to 47 .L0: # return here from .Ldstunaligned when dst is aligned 54 * Destination is word-aligned. 56 # set 16 bytes per iteration for word-aligned dst 106 bbci.l a5, 0, .L20 # branch if dst alignment half-aligned 107 # dst is only byte aligned 112 # now retest if dst aligned 113 bbci.l a5, 1, .L0 # if now aligned, return to main algorithm [all …]
|
| D | memcopy.S | 34 * If source is aligned, 40 * case of aligned source and destination and multiple 89 .Ldst1mod2: # dst is only byte aligned 98 _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then 100 .Ldst2mod4: # dst 16-bit aligned 110 j .Ldstaligned # dst is now aligned, return to main algorithm 121 .Ldstaligned: # return here from .Ldst?mod? once dst is aligned 124 movi a8, 3 # if source is not aligned, 127 * Destination and source are word-aligned, use word copy. 129 # copy 16 bytes per iteration for word-aligned dst and word-aligned src [all …]
|
| D | usercopy.S | 30 * If the destination and source are both aligned, 33 * If destination is aligned and source unaligned, 38 * case of aligned destinations (except for the branches to 70 .Ldstaligned: # return here from .Ldstunaligned when dst is aligned 73 movi a8, 3 # if source is also aligned, 84 .Ldst1mod2: # dst is only byte aligned 93 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then 95 .Ldst2mod4: # dst 16-bit aligned 105 j .Ldstaligned # dst is now aligned, return to main algorithm 133 * Destination and source are word-aligned. [all …]
|
| /kernel/linux/linux-6.6/arch/xtensa/include/asm/ |
| D | coprocessor.h | 118 #define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); 119 #define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); 122 __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN))); 124 __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN))); 129 __attribute__ ((aligned (XCHAL_CP0_SA_ALIGN))); 131 __attribute__ ((aligned (XCHAL_CP1_SA_ALIGN))); 133 __attribute__ ((aligned (XCHAL_CP2_SA_ALIGN))); 135 __attribute__ ((aligned (XCHAL_CP3_SA_ALIGN))); 137 __attribute__ ((aligned (XCHAL_CP4_SA_ALIGN))); 139 __attribute__ ((aligned (XCHAL_CP5_SA_ALIGN))); [all …]
|
| /kernel/linux/linux-5.10/drivers/scsi/pm8001/ |
| D | pm8001_hwi.h | 146 } __attribute__((packed, aligned(4))); 158 } __attribute__((packed, aligned(4))); 169 } __attribute__((packed, aligned(4))); 221 } __attribute__((packed, aligned(4))); 234 } __attribute__((packed, aligned(4))); 250 } __attribute__((packed, aligned(4))); 263 } __attribute__((packed, aligned(4))); 276 } __attribute__((packed, aligned(4))); 287 } __attribute__((packed, aligned(4))); 299 } __attribute__((packed, aligned(4))); [all …]
|
| D | pm80xx_hwi.h | 345 } __attribute__((packed, aligned(4))); 357 } __attribute__((packed, aligned(4))); 367 } __attribute__((packed, aligned(4))); 418 } __attribute__((packed, aligned(4))); 432 } __attribute__((packed, aligned(4))); 441 } __attribute__((packed, aligned(4))); 456 } __attribute__((packed, aligned(4))); 468 } __attribute__((packed, aligned(4))); 479 } __attribute__((packed, aligned(4))); 489 } __attribute__((packed, aligned(4))); [all …]
|
| /kernel/linux/linux-6.6/drivers/scsi/pm8001/ |
| D | pm8001_hwi.h | 146 } __attribute__((packed, aligned(4))); 158 } __attribute__((packed, aligned(4))); 169 } __attribute__((packed, aligned(4))); 221 } __attribute__((packed, aligned(4))); 234 } __attribute__((packed, aligned(4))); 250 } __attribute__((packed, aligned(4))); 263 } __attribute__((packed, aligned(4))); 276 } __attribute__((packed, aligned(4))); 287 } __attribute__((packed, aligned(4))); 299 } __attribute__((packed, aligned(4))); [all …]
|
| D | pm80xx_hwi.h | 345 } __attribute__((packed, aligned(4))); 357 } __attribute__((packed, aligned(4))); 367 } __attribute__((packed, aligned(4))); 418 } __attribute__((packed, aligned(4))); 432 } __attribute__((packed, aligned(4))); 441 } __attribute__((packed, aligned(4))); 456 } __attribute__((packed, aligned(4))); 468 } __attribute__((packed, aligned(4))); 479 } __attribute__((packed, aligned(4))); 489 } __attribute__((packed, aligned(4))); [all …]
|
| /kernel/linux/patches/linux-4.19/prebuilts/usr/include/sound/ |
| D | compress_offload.h | 15 } __attribute__((packed, aligned(4))); 20 } __attribute__((packed, aligned(4))); 27 } __attribute__((packed, aligned(4))); 31 } __attribute__((packed, aligned(4))); 45 } __attribute__((packed, aligned(4))); 50 } __attribute__((packed, aligned(4))); 58 } __attribute__((packed, aligned(4)));
|
| /kernel/linux/linux-6.6/include/uapi/linux/ |
| D | rseq.h | 41 * struct rseq_cs is aligned on 4 * 8 bytes to ensure it is always 54 } __attribute__((aligned(4 * sizeof(__u64)))); 57 * struct rseq is aligned on 4 * 8 bytes to ensure it is always 67 * registered this data structure. Aligned on 32-bit. Always 80 * data structure. Aligned on 32-bit. Values 107 * thread which registered this data structure. Aligned on 64-bit. 138 * Aligned on 32-bit. Contains the current NUMA node ID. 146 * Aligned on 32-bit. Contains the current thread's concurrency ID 155 } __attribute__((aligned(4 * sizeof(__u64))));
|
| D | types.h | 17 typedef __signed__ __int128 __s128 __attribute__((aligned(16))); 18 typedef unsigned __int128 __u128 __attribute__((aligned(16))); 55 #define __aligned_u64 __u64 __attribute__((aligned(8))) 56 #define __aligned_s64 __s64 __attribute__((aligned(8))) 57 #define __aligned_be64 __be64 __attribute__((aligned(8))) 58 #define __aligned_le64 __le64 __attribute__((aligned(8)))
|
| /kernel/linux/linux-6.6/tools/testing/selftests/rseq/ |
| D | rseq-abi.h | 41 * struct rseq_abi_cs is aligned on 4 * 8 bytes to ensure it is always 54 } __attribute__((aligned(4 * sizeof(__u64)))); 57 * struct rseq_abi is aligned on 4 * 8 bytes to ensure it is always 67 * registered this data structure. Aligned on 32-bit. Always 80 * data structure. Aligned on 32-bit. Values 107 * thread which registered this data structure. Aligned on 64-bit. 154 * Aligned on 32-bit. Contains the current NUMA node ID. 162 * Aligned on 32-bit. Contains the current thread's concurrency ID 171 } __attribute__((aligned(4 * sizeof(__u64))));
|
| /kernel/linux/linux-5.10/drivers/staging/media/atomisp/pci/ |
| D | ia_css_env.h | 53 The address must be an 8 bit aligned address. */ 56 The address must be a 16 bit aligned address. */ 59 The address must be a 32 bit aligned address. */ 62 space. The address must be an 8 bit aligned address. */ 65 space. The address must be a 16 bit aligned address. */ 68 space. The address must be a 32 bit aligned address. */ 70 /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */ 72 /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */
|
| /kernel/linux/linux-6.6/drivers/staging/media/atomisp/pci/ |
| D | ia_css_env.h | 54 The address must be an 8 bit aligned address. */ 57 The address must be a 16 bit aligned address. */ 60 The address must be a 32 bit aligned address. */ 63 space. The address must be an 8 bit aligned address. */ 66 space. The address must be a 16 bit aligned address. */ 69 space. The address must be a 32 bit aligned address. */ 71 /** Store a number of bytes into a byte-aligned address in the CSS HW address space. */ 73 /** Load a number of bytes from a byte-aligned address in the CSS HW address space. */
|
| /kernel/linux/linux-6.6/arch/mips/kernel/ |
| D | cmpxchg.c | 16 /* Check that ptr is naturally aligned */ in __xchg_small() 25 * exchange within the naturally aligned 4 byte integer that includes in __xchg_small() 35 * Calculate a pointer to the naturally aligned 4 byte integer that in __xchg_small() 57 /* Check that ptr is naturally aligned */ in __cmpxchg_small() 67 * compare & exchange within the naturally aligned 4 byte integer in __cmpxchg_small() 77 * Calculate a pointer to the naturally aligned 4 byte integer that in __cmpxchg_small() 93 * Calculate the old & new values of the naturally aligned in __cmpxchg_small()
|
| /kernel/linux/linux-5.10/arch/mips/kernel/ |
| D | cmpxchg.c | 16 /* Check that ptr is naturally aligned */ in __xchg_small() 25 * exchange within the naturally aligned 4 byte integerthat includes in __xchg_small() 35 * Calculate a pointer to the naturally aligned 4 byte integer that in __xchg_small() 57 /* Check that ptr is naturally aligned */ in __cmpxchg_small() 67 * compare & exchange within the naturally aligned 4 byte integer in __cmpxchg_small() 77 * Calculate a pointer to the naturally aligned 4 byte integer that in __cmpxchg_small() 93 * Calculate the old & new values of the naturally aligned in __cmpxchg_small()
|