/lib/ |
D | atomic64.c | 40 static inline raw_spinlock_t *lock_addr(const atomic64_t *v) in lock_addr() argument 42 unsigned long addr = (unsigned long) v; in lock_addr() 49 long long atomic64_read(const atomic64_t *v) in atomic64_read() argument 52 raw_spinlock_t *lock = lock_addr(v); in atomic64_read() 56 val = v->counter; in atomic64_read() 62 void atomic64_set(atomic64_t *v, long long i) in atomic64_set() argument 65 raw_spinlock_t *lock = lock_addr(v); in atomic64_set() 68 v->counter = i; in atomic64_set() 74 void atomic64_##op(long long a, atomic64_t *v) \ 77 raw_spinlock_t *lock = lock_addr(v); \ [all …]
|
D | iov_iter.c | 75 #define iterate_all_kinds(i, n, v, I, B, K) { \ argument 79 struct bio_vec v; \ 81 iterate_bvec(i, n, v, __bi, skip, (B)) \ 84 struct kvec v; \ 85 iterate_kvec(i, n, v, kvec, skip, (K)) \ 88 struct iovec v; \ 89 iterate_iovec(i, n, v, iov, skip, (I)) \ 94 #define iterate_and_advance(i, n, v, I, B, K) { \ argument 101 struct bio_vec v; \ 103 iterate_bvec(i, n, v, __bi, skip, (B)) \ [all …]
|
D | atomic64_test.c | 26 atomic##bit##_set(&v, v0); \ 28 atomic##bit##_##op(val, &v); \ 30 WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \ 31 (unsigned long long)atomic##bit##_read(&v), \ 50 atomic##bit##_set(&v, v0); \ 53 BUG_ON(atomic##bit##_##op(val, &v) != r); \ 54 BUG_ON(atomic##bit##_read(&v) != r); \ 59 atomic##bit##_set(&v, v0); \ 62 BUG_ON(atomic##bit##_##op(val, &v) != v0); \ 63 BUG_ON(atomic##bit##_read(&v) != r); \ [all …]
|
D | win_minmax.c | 63 return m->s[0].v; in minmax_subwin_update() 69 struct minmax_sample val = { .t = t, .v = meas }; in minmax_running_max() 71 if (unlikely(val.v >= m->s[0].v) || /* found new max? */ in minmax_running_max() 75 if (unlikely(val.v >= m->s[1].v)) in minmax_running_max() 77 else if (unlikely(val.v >= m->s[2].v)) in minmax_running_max() 87 struct minmax_sample val = { .t = t, .v = meas }; in minmax_running_min() 89 if (unlikely(val.v <= m->s[0].v) || /* found new min? */ in minmax_running_min() 93 if (unlikely(val.v <= m->s[1].v)) in minmax_running_min() 95 else if (unlikely(val.v <= m->s[2].v)) in minmax_running_min()
|
D | oid_registry.c | 112 const unsigned char *v = data, *end = v + datasize; in sprint_oid() local 118 if (v >= end) in sprint_oid() 121 n = *v++; in sprint_oid() 128 while (v < end) { in sprint_oid() 130 n = *v++; in sprint_oid() 136 if (v >= end) in sprint_oid() 138 n = *v++; in sprint_oid()
|
D | earlycpio.c | 73 unsigned int ch[C_NFIELDS], *chp, v; in find_cpio_data() local 91 v = 0; in find_cpio_data() 93 v <<= 4; in find_cpio_data() 98 v += x; in find_cpio_data() 104 v += x + 10; in find_cpio_data() 110 *chp++ = v; in find_cpio_data()
|
D | string.c | 738 void *memset16(uint16_t *s, uint16_t v, size_t count) in memset16() argument 743 *xs++ = v; in memset16() 760 void *memset32(uint32_t *s, uint32_t v, size_t count) in memset32() argument 765 *xs++ = v; in memset32() 782 void *memset64(uint64_t *s, uint64_t v, size_t count) in memset64() argument 787 *xs++ = v; in memset64() 1083 u16 v, *p; in memset16_selftest() local 1091 memset(p, 0xa1, 256 * 2 * sizeof(v)); in memset16_selftest() 1094 v = p[k]; in memset16_selftest() 1096 if (v != 0xa1a1) in memset16_selftest() [all …]
|
D | inflate.c | 145 } v; member 233 #define NEXTBYTE() ({ int v = get_byte(); if (v < 0) goto underrun; (uch)v; }) 355 unsigned v[N_MAX]; /* values in order of bit length */ in huft_build() member 358 unsigned *c, *v, *x; in huft_build() local 369 v = stk->v; in huft_build() 437 v[x[j]++] = i; in huft_build() 445 p = v; /* grab values in bit order */ in huft_build() 499 *(t = &(q->v.t)) = (struct huft *)NULL; in huft_build() 509 r.v.t = q; /* pointer to this table */ in huft_build() 519 if (p >= v + n) in huft_build() [all …]
|
D | bch.c | 256 static inline int modulo(struct bch_control *bch, unsigned int v) in modulo() argument 259 while (v >= n) { in modulo() 260 v -= n; in modulo() 261 v = (v & n) + (v >> GF_M(bch)); in modulo() 263 return v; in modulo() 269 static inline int mod_s(struct bch_control *bch, unsigned int v) in mod_s() argument 272 return (v < n) ? v : v-n; in mod_s() 564 unsigned int u, v, r; in find_poly_deg2_roots() local 581 v = u; in find_poly_deg2_roots() 582 while (v) { in find_poly_deg2_roots() [all …]
|
D | fault-inject.c | 55 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) argument
|
D | debugobjects.c | 782 static int debug_stats_show(struct seq_file *m, void *v) in debug_stats_show() argument
|
D | Kconfig.debug | 462 This enables checks whether a k/v free operation frees an area
|
/lib/raid6/ |
D | mktables.c | 26 uint8_t v = 0; in gfmul() local 30 v ^= a; in gfmul() 35 return v; in gfmul() 40 uint8_t v = 1; in gfpow() local 48 v = gfmul(v, a); in gfpow() 53 return v; in gfpow() 59 uint8_t v; in main() local 110 v = 1; in main() 116 exptbl[i + j] = v; in main() 117 printf("0x%02x,%c", v, (j == 7) ? '\n' : ' '); in main() [all …]
|
D | tilegx.uc | 33 static inline __attribute_const__ u64 SHLBYTE(u64 v) 36 return __insn_v1shli(v, 1); 43 static inline __attribute_const__ u64 MASK(u64 v) 46 return __insn_v1shrsi(v, 7);
|
D | neon.uc | 38 static inline unative_t SHLBYTE(unative_t v) 40 return vshlq_n_u8(v, 1); 47 static inline unative_t MASK(unative_t v) 49 return (unative_t)vshrq_n_s8((int8x16_t)v, 7); 52 static inline unative_t PMUL(unative_t v, unative_t u) 54 return (unative_t)vmulq_p8((poly8x16_t)v, (poly8x16_t)u);
|
D | altivec.uc | 47 static inline __attribute_const__ unative_t SHLBYTE(unative_t v) 49 return vec_add(v,v); 56 static inline __attribute_const__ unative_t MASK(unative_t v) 61 return (unative_t)vec_cmpgt(zv, v);
|
D | int.uc | 60 static inline __attribute_const__ unative_t SHLBYTE(unative_t v) 64 vv = (v << 1) & NBYTES(0xfe); 72 static inline __attribute_const__ unative_t MASK(unative_t v) 76 vv = v & NBYTES(0x80);
|
/lib/mpi/ |
D | mpi-cmp.c | 23 int mpi_cmp_ui(MPI u, unsigned long v) in mpi_cmp_ui() argument 25 mpi_limb_t limb = v; in mpi_cmp_ui() 44 int mpi_cmp(MPI u, MPI v) in mpi_cmp() argument 50 mpi_normalize(v); in mpi_cmp() 52 vsize = v->nlimbs; in mpi_cmp() 53 if (!u->sign && v->sign) in mpi_cmp() 55 if (u->sign && !v->sign) in mpi_cmp() 57 if (usize != vsize && !u->sign && !v->sign) in mpi_cmp() 59 if (usize != vsize && u->sign && v->sign) in mpi_cmp() 63 cmp = mpihelp_cmp(u->d, v->d, usize); in mpi_cmp()
|
D | longlong.h | 232 #define umul_ppmm(w1, w0, u, v) \ argument 239 "r" ((USItype)(v))); \ 241 #define smul_ppmm(w1, w0, u, v) \ argument 248 "r" ((SItype)(v))); \ 250 #define __umulsidi3(u, v) \ argument 255 "r" ((USItype)(v))); \ 320 #define umul_ppmm(wh, wl, u, v) \ 328 "*f" ((USItype)(v))); \ 415 #define umul_ppmm(w1, w0, u, v) \ argument 420 "rm" ((USItype)(v))) [all …]
|
/lib/lzo/ |
D | lzo1x_compress.c | 94 u64 v; in lzo1x_1_do_compress() local 95 v = get_unaligned((const u64 *) (ip + m_len)) ^ in lzo1x_1_do_compress() 97 if (unlikely(v == 0)) { in lzo1x_1_do_compress() 100 v = get_unaligned((const u64 *) (ip + m_len)) ^ in lzo1x_1_do_compress() 104 } while (v == 0); in lzo1x_1_do_compress() 107 m_len += (unsigned) __builtin_ctzll(v) / 8; in lzo1x_1_do_compress() 109 m_len += (unsigned) __builtin_clzll(v) / 8; in lzo1x_1_do_compress() 114 u32 v; in lzo1x_1_do_compress() 115 v = get_unaligned((const u32 *) (ip + m_len)) ^ in lzo1x_1_do_compress() 117 if (unlikely(v == 0)) { in lzo1x_1_do_compress() [all …]
|
/lib/842/ |
D | 842_decompress.c | 142 u64 v; in do_data() local 148 ret = next_bits(p, &v, n * 8); in do_data() 154 put_unaligned(cpu_to_be16((u16)v), (__be16 *)p->out); in do_data() 157 put_unaligned(cpu_to_be32((u32)v), (__be32 *)p->out); in do_data() 160 put_unaligned(cpu_to_be64((u64)v), (__be64 *)p->out); in do_data()
|
/lib/zlib_deflate/ |
D | deftree.c | 377 int v = s->heap[k]; in pqdownheap() local 386 if (smaller(tree, v, s->heap[j], s->depth)) break; in pqdownheap() 394 s->heap[k] = v; in pqdownheap()
|