| /kernel/linux/linux-6.6/rust/kernel/sync/ |
| D | lock.rs | 3 //! Generic kernel lock and guard. 5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes, 118 pub fn lock(&self) -> Guard<'_, T, B> { in lock() 123 unsafe { Guard::new(self, state) } in lock() 127 /// A lock guard. 130 /// when a guard goes out of scope. It also provides a safe and convenient way to access the data 132 #[must_use = "the lock unlocks immediately when the guard is unused"] 133 pub struct Guard<'a, T: ?Sized, B: Backend> { struct 139 // SAFETY: `Guard` is sync when the data protected by the lock is also sync. argument 140 unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {} implementation [all …]
|
| D | condvar.rs | 8 use super::{lock::Backend, lock::Guard, LockClassKey}; 51 /// let mut guard = e.value.lock(); 52 /// while *guard != v { 53 /// e.value_changed.wait_uninterruptible(&mut guard); 106 fn wait_internal<T: ?Sized, B: Backend>(&self, wait_state: u32, guard: &mut Guard<'_, T, B>) { in wait_internal() 118 guard.do_unlocked(|| unsafe { bindings::schedule() }); in wait_internal() 126 /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the 133 pub fn wait<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) -> bool { in wait() 134 self.wait_internal(bindings::TASK_INTERRUPTIBLE, guard); in wait() 142 pub fn wait_uninterruptible<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) { in wait_uninterruptible() [all …]
|
| D | locked_by.rs | 53 /// let guard = dir.inner.lock(); 54 /// let inner_file = file.inner.access(&guard); 55 /// pr_info!("{} {}", guard.bytes_used, inner_file.bytes_used); 60 /// let mut guard = dir.inner.lock(); 61 /// guard.bytes_used += 10; 63 /// let file_inner = file.inner.access_mut(&mut guard);
|
| /kernel/linux/linux-6.6/drivers/char/tpm/ |
| D | tpm_tis_i2c.c | 33 /* Default Guard Time of 250µs until interface capability register is read */ 37 /* Guard Time of 250µs after I2C slave NACK */ 41 /* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */ 277 * Guard Time: 282 * Before the Guard Time is read (or after the TPM failed to send an I2C NACK), 283 * a Guard Time of 250µs applies. 285 * Various flags in the same register indicate if a guard time is needed: 286 * - SR: <I2C read with repeated start> <guard time> <I2C read> 287 * - RR: <I2C read> <guard time> <I2C read> 288 * - RW: <I2C read> <guard time> <I2C write> [all …]
|
| /kernel/linux/linux-5.10/include/rdma/ |
| D | signature.h | 33 * enum ib_t10_dif_bg_type - Signature T10-DIF block-guard types 45 * @bg_type: T10-DIF block guard type (CRC|CSUM) 47 * @bg: seed of guard computation. 48 * @app_tag: application tag of guard block 49 * @ref_tag: initial guard block reference tag. 103 * | GUARD | APPTAG | REFTAG |
|
| /kernel/linux/linux-6.6/include/rdma/ |
| D | signature.h | 33 * enum ib_t10_dif_bg_type - Signature T10-DIF block-guard types 45 * @bg_type: T10-DIF block guard type (CRC|CSUM) 47 * @bg: seed of guard computation. 48 * @app_tag: application tag of guard block 49 * @ref_tag: initial guard block reference tag. 103 * | GUARD | APPTAG | REFTAG |
|
| /kernel/linux/linux-6.6/Documentation/mm/ |
| D | vmalloced-kernel-stacks.rst | 25 Virtually-mapped kernel stacks with guard pages causes kernel stack 30 support for virtually mapped stacks with guard pages. This feature 53 - If the stack overflows into a guard page, something reasonable 64 with guard pages. This causes kernel stack overflows to be caught 123 Leading and trailing guard pages help detect stack overflows. When stack 124 overflows into the guard pages, handlers have to be careful not overflow 131 Testing VMAP allocation with guard pages 135 and trailing guard page? The following lkdtm tests can help detect any
|
| /kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
| D | rxe_verbs.h | 83 spinlock_t sq_lock; /* guard queue */ 90 spinlock_t producer_lock; /* guard queue producer */ 91 spinlock_t consumer_lock; /* guard queue consumer */ 235 spinlock_t grp_lock; /* guard grp_list */ 260 spinlock_t state_lock; /* guard requester and completer */ 324 spinlock_t mcg_lock; /* guard group */ 345 spinlock_t port_lock; /* guard port */ 374 spinlock_t pending_lock; /* guard pending_mmaps */ 377 spinlock_t mmap_offset_lock; /* guard mmap_offset */
|
| /kernel/linux/linux-5.10/arch/parisc/math-emu/ |
| D | cnv_float.h | 88 #define Dbl_to_sgl_mantissa(srcA,srcB,dest,inexact,guard,sticky,odd) \ argument 90 guard = Dbit3p2(srcB); \ 92 inexact = guard | sticky; \ 95 #define Dbl_to_sgl_denormalized(srcA,srcB,exp,dest,inexact,guard,sticky,odd,tiny) \ argument 101 guard = inexact >> 31; \ 124 if (guard && (sticky || odd)) { \ 134 guard = odd; \ 136 inexact |= guard; \ 144 guard = inexact >> 31; \ 157 guard = inexact >> 31; \ [all …]
|
| /kernel/linux/linux-6.6/arch/parisc/math-emu/ |
| D | cnv_float.h | 88 #define Dbl_to_sgl_mantissa(srcA,srcB,dest,inexact,guard,sticky,odd) \ argument 90 guard = Dbit3p2(srcB); \ 92 inexact = guard | sticky; \ 95 #define Dbl_to_sgl_denormalized(srcA,srcB,exp,dest,inexact,guard,sticky,odd,tiny) \ argument 101 guard = inexact >> 31; \ 124 if (guard && (sticky || odd)) { \ 134 guard = odd; \ 136 inexact |= guard; \ 144 guard = inexact >> 31; \ 157 guard = inexact >> 31; \ [all …]
|
| /kernel/linux/linux-5.10/scripts/atomic/ |
| D | gen-atomic-instrumented.sh | 85 local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")" 93 [ ! -z "${guard}" ] && printf "#if ${guard}\n" 105 [ ! -z "${guard}" ] && printf "#endif\n" 129 local guard="defined(arch_${name}${sfx})" 131 [ -z "${sfx}" ] && guard="!defined(arch_${name}_relaxed) || defined(arch_${name})" 133 printf "#if ${guard}\n"
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
| D | i915_vma_resource.h | 82 * @guard: The size of guard area preceding and trailing the bind. 129 u32 guard; member 194 * @guard: The size of the guard area preceding and trailing the bind. 214 u32 guard) in i915_vma_resource_init() argument 232 vma_res->guard = guard; in i915_vma_resource_init()
|
| /kernel/linux/linux-6.6/arch/x86/include/asm/ |
| D | cpu_entry_area.h | 35 /* The exception stacks' physical storage. No guard pages required */ 40 /* The effective cpu entry area mapping with guard pages. */ 95 * a read-only guard page. On 32-bit the GDT must be writeable, so 96 * it needs an extra guard page. 116 * Exception stacks used for IST entries with guard pages.
|
| /kernel/linux/linux-5.10/arch/x86/include/asm/ |
| D | cpu_entry_area.h | 35 /* The exception stacks' physical storage. No guard pages required */ 40 /* The effective cpu entry area mapping with guard pages. */ 95 * a read-only guard page. On 32-bit the GDT must be writeable, so 96 * it needs an extra guard page. 116 * Exception stacks used for IST entries with guard pages.
|
| /kernel/linux/linux-6.6/scripts/ |
| D | syscallnr.sh | 57 guard=_ASM_$(basename "$outfile" | 62 echo "#ifndef $guard" 63 echo "#define $guard" 73 echo "#endif /* $guard */"
|
| D | gcc-x86_32-has-stack-protector.sh | 5 # -mstack-protector-guard-reg, added by 8 …| $* -S -x c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-sym…
|
| D | syscallhdr.sh | 68 guard=_UAPI_ASM_$(basename "$outfile" | 73 echo "#ifndef $guard" 74 echo "#define $guard" 97 echo "#endif /* $guard */"
|
| /kernel/linux/linux-5.10/drivers/media/dvb-frontends/ |
| D | lgs8gxx_priv.h | 20 u16 curr_gi; /* current guard interval */ 42 #define GI_MASK 0x03 /* Guard Interval Mask */ 43 #define GI_420 0x00 /* 1/9 Guard Interval */ 45 #define GI_945 0x02 /* 1/4 Guard Interval */
|
| /kernel/linux/linux-6.6/drivers/media/dvb-frontends/ |
| D | lgs8gxx_priv.h | 20 u16 curr_gi; /* current guard interval */ 42 #define GI_MASK 0x03 /* Guard Interval Mask */ 43 #define GI_420 0x00 /* 1/9 Guard Interval */ 45 #define GI_945 0x02 /* 1/4 Guard Interval */
|
| /kernel/linux/linux-5.10/drivers/staging/fwserial/ |
| D | dma_fifo.h | 31 * Additional cache lines (ie, guard area) are used to minimize DMA 33 * guard area, but the in and out FIFO markers are wrapped when DMA is pended. 36 #define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */ 46 int guard; /* ofs of guard area */ member
|
| /kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/ |
| D | rxe_verbs.h | 82 spinlock_t sq_lock; /* guard queue */ 89 spinlock_t producer_lock; /* guard queue producer */ 90 spinlock_t consumer_lock; /* guard queue consumer */ 251 spinlock_t state_lock; /* guard requester and completer */ 366 spinlock_t port_lock; /* guard port */ 396 spinlock_t pending_lock; /* guard pending_mmaps */ 399 spinlock_t mmap_offset_lock; /* guard mmap_offset */
|
| /kernel/linux/linux-5.10/Documentation/arm64/ |
| D | memory.rst | 39 fffffdffbfff0000 fffffdfffe5f8fff ~998MB [guard region] 41 fffffdfffea00000 fffffdfffebfffff 2MB [guard region] 43 fffffdffffc00000 fffffdffffdfffff 2MB [guard region] 45 ffffffffffe00000 ffffffffffffffff 2MB [guard region] 59 fffff81fffff0000 fffffc1ffe58ffff ~3TB [guard region] 61 fffffc1ffea00000 fffffc1ffebfffff 2MB [guard region] 63 fffffc1fffc00000 fffffc1fffdfffff 2MB [guard region] 65 ffffffffffe00000 ffffffffffffffff 2MB [guard region]
|
| /kernel/linux/linux-5.10/arch/powerpc/oprofile/cell/ |
| D | pr_util.h | 48 * The guard pointer is an entry in the _ovly_buf_table, 53 * The guard value is stored in the _ovly_buf_table entry and 59 * _ovly_buf_table, which contains a guard value 63 * _ovly_buf_table, which contains a guard value
|
| D | vma_map.c | 247 * args are referred to as the guard pointer and the guard in create_vma_map() 249 * The guard pointer is an entry in the _ovly_buf_table, in create_vma_map() 254 * The guard value is stored in the _ovly_buf_table entry and in create_vma_map() 260 * _ovly_buf_table, which contains a guard value in create_vma_map() 264 * _ovly_buf_table, which contains a guard value in create_vma_map()
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | t10-pi.h | 14 * Type 1 defines the contents of the guard and reference tags 16 * Type 2 defines the contents of the guard and reference tags and 19 * Type 3 defines the contents of the guard tag only
|