/include/linux/ |
D | ptr_ring.h | 49 static inline bool __ptr_ring_full(struct ptr_ring *r) in __ptr_ring_full() argument 51 return r->queue[r->producer]; in __ptr_ring_full() 54 static inline bool ptr_ring_full(struct ptr_ring *r) in ptr_ring_full() argument 58 spin_lock(&r->producer_lock); in ptr_ring_full() 59 ret = __ptr_ring_full(r); in ptr_ring_full() 60 spin_unlock(&r->producer_lock); in ptr_ring_full() 65 static inline bool ptr_ring_full_irq(struct ptr_ring *r) in ptr_ring_full_irq() argument 69 spin_lock_irq(&r->producer_lock); in ptr_ring_full_irq() 70 ret = __ptr_ring_full(r); in ptr_ring_full_irq() 71 spin_unlock_irq(&r->producer_lock); in ptr_ring_full_irq() [all …]
|
D | refcount.h | 127 void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t); 134 static inline void refcount_set(refcount_t *r, int n) in refcount_set() argument 136 atomic_set(&r->refs, n); in refcount_set() 145 static inline unsigned int refcount_read(const refcount_t *r) in refcount_read() argument 147 return atomic_read(&r->refs); in refcount_read() 150 static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) in __refcount_add_not_zero() argument 152 int old = refcount_read(r); in __refcount_add_not_zero() 157 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); in __refcount_add_not_zero() 163 refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); in __refcount_add_not_zero() 186 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) in refcount_add_not_zero() argument [all …]
|
D | linear_range.h | 40 unsigned int linear_range_values_in_range(const struct linear_range *r); 41 unsigned int linear_range_values_in_range_array(const struct linear_range *r, 43 unsigned int linear_range_get_max_value(const struct linear_range *r); 45 int linear_range_get_value(const struct linear_range *r, unsigned int selector, 47 int linear_range_get_value_array(const struct linear_range *r, int ranges, 49 int linear_range_get_selector_low(const struct linear_range *r, 52 int linear_range_get_selector_high(const struct linear_range *r, 55 void linear_range_get_selector_within(const struct linear_range *r, 57 int linear_range_get_selector_low_array(const struct linear_range *r,
|
D | damon.h | 23 static inline unsigned long damon_rand(unsigned long l, unsigned long r) in damon_rand() argument 25 return l + get_random_u32_below(r - l); in damon_rand() 411 struct damon_target *t, struct damon_region *r, 414 struct damon_target *t, struct damon_region *r, 551 static inline struct damon_region *damon_next_region(struct damon_region *r) in damon_next_region() argument 553 return container_of(r->list.next, struct damon_region, list); in damon_next_region() 556 static inline struct damon_region *damon_prev_region(struct damon_region *r) in damon_prev_region() argument 558 return container_of(r->list.prev, struct damon_region, list); in damon_prev_region() 571 static inline unsigned long damon_sz_region(struct damon_region *r) in damon_sz_region() argument 573 return r->ar.end - r->ar.start; in damon_sz_region() [all …]
|
D | resctrl.h | 208 u32 resctrl_arch_get_num_closid(struct rdt_resource *r); 209 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); 215 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, 218 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, 220 int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d); 221 void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); 237 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, 250 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, 262 void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d);
|
D | inet_diag.h | 13 const struct inet_diag_req_v2 *r); 19 struct inet_diag_msg *r, 52 const struct inet_diag_req_v2 *r); 63 void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk); 83 struct inet_diag_msg *r, int ext,
|
D | raid_class.h | 56 raid_set_##attr(struct raid_template *r, struct device *dev, type value) { \ 58 attribute_container_find_class_device(&r->raid_attrs.ac, dev);\ 65 raid_get_##attr(struct raid_template *r, struct device *dev) { \ 67 attribute_container_find_class_device(&r->raid_attrs.ac, dev);\
|
/include/drm/ |
D | drm_rect.h | 74 #define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1 argument 87 #define DRM_RECT_FP_ARG(r) \ argument 88 drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \ 89 drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \ 90 (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \ 91 (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10 101 static inline void drm_rect_init(struct drm_rect *r, int x, int y, in drm_rect_init() argument 104 r->x1 = x; in drm_rect_init() 105 r->y1 = y; in drm_rect_init() 106 r->x2 = x + width; in drm_rect_init() [all …]
|
/include/media/ |
D | v4l2-rect.h | 18 static inline void v4l2_rect_set_size_to(struct v4l2_rect *r, in v4l2_rect_set_size_to() argument 21 r->width = size->width; in v4l2_rect_set_size_to() 22 r->height = size->height; in v4l2_rect_set_size_to() 30 static inline void v4l2_rect_set_min_size(struct v4l2_rect *r, in v4l2_rect_set_min_size() argument 33 if (r->width < min_size->width) in v4l2_rect_set_min_size() 34 r->width = min_size->width; in v4l2_rect_set_min_size() 35 if (r->height < min_size->height) in v4l2_rect_set_min_size() 36 r->height = min_size->height; in v4l2_rect_set_min_size() 44 static inline void v4l2_rect_set_max_size(struct v4l2_rect *r, in v4l2_rect_set_max_size() argument 47 if (r->width > max_size->width) in v4l2_rect_set_max_size() [all …]
|
/include/asm-generic/bitops/ |
D | ffs.h | 15 int r = 1; in ffs() local 21 r += 16; in ffs() 25 r += 8; in ffs() 29 r += 4; in ffs() 33 r += 2; in ffs() 37 r += 1; in ffs() 39 return r; in ffs()
|
D | fls.h | 15 int r = 32; in fls() local 21 r -= 16; in fls() 25 r -= 8; in fls() 29 r -= 4; in fls() 33 r -= 2; in fls() 37 r -= 1; in fls() 39 return r; in fls()
|
/include/crypto/ |
D | b128ops.h | 60 static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) in be128_xor() argument 62 r->a = p->a ^ q->a; in be128_xor() 63 r->b = p->b ^ q->b; in be128_xor() 66 static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) in le128_xor() argument 68 r->a = p->a ^ q->a; in le128_xor() 69 r->b = p->b ^ q->b; in le128_xor()
|
D | gf128mul.h | 182 static inline void gf128mul_x_lle(be128 *r, const be128 *x) in gf128mul_x_lle() argument 191 r->b = cpu_to_be64((b >> 1) | (a << 63)); in gf128mul_x_lle() 192 r->a = cpu_to_be64((a >> 1) ^ _tt); in gf128mul_x_lle() 195 static inline void gf128mul_x_bbe(be128 *r, const be128 *x) in gf128mul_x_bbe() argument 203 r->a = cpu_to_be64((a << 1) | (b >> 63)); in gf128mul_x_bbe() 204 r->b = cpu_to_be64((b << 1) ^ _tt); in gf128mul_x_bbe() 208 static inline void gf128mul_x_ble(le128 *r, const le128 *x) in gf128mul_x_ble() argument 216 r->a = cpu_to_le64((a << 1) | (b >> 63)); in gf128mul_x_ble() 217 r->b = cpu_to_le64((b << 1) ^ _tt); in gf128mul_x_ble() 230 void gf128mul_x8_ble(le128 *r, const le128 *x);
|
D | aria.h | 309 static inline u32 rotl32(u32 v, u32 r) in rotl32() argument 311 return ((v << r) | (v >> (32 - r))); in rotl32() 314 static inline u32 rotr32(u32 v, u32 r) in rotr32() argument 316 return ((v >> r) | (v << (32 - r))); in rotr32() 437 int r = n % 32; in aria_gsrk() local 440 ((y[q % 4]) >> r) ^ in aria_gsrk() 441 ((y[(q + 3) % 4]) << (32 - r)); in aria_gsrk() 443 ((y[(q + 1) % 4]) >> r) ^ in aria_gsrk() 444 ((y[q % 4]) << (32 - r)); in aria_gsrk() 446 ((y[(q + 2) % 4]) >> r) ^ in aria_gsrk() [all …]
|
/include/soc/arc/ |
D | aux.h | 11 #define read_aux_reg(r) __builtin_arc_lr(r) argument 14 #define write_aux_reg(r, v) __builtin_arc_sr((unsigned int)(v), r) argument 18 static inline int read_aux_reg(u32 r) in read_aux_reg() argument 27 static inline void write_aux_reg(u32 r, u32 v) in write_aux_reg() argument
|
/include/trace/events/ |
D | damon.h | 15 struct damon_region *r, unsigned int nr_regions), 17 TP_ARGS(t, target_id, r, nr_regions), 31 __entry->start = r->ar.start; 32 __entry->end = r->ar.end; 33 __entry->nr_accesses = r->nr_accesses; 34 __entry->age = r->age;
|
/include/trace/hooks/ |
D | logbuf.h | 16 TP_PROTO(struct printk_ringbuffer *rb, struct printk_record *r), 17 TP_ARGS(rb, r)) 20 TP_PROTO(struct printk_record *r, size_t text_len), 21 TP_ARGS(r, text_len))
|
/include/xen/ |
D | hvm.h | 42 int r; in hvm_get_parameter() local 46 r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); in hvm_get_parameter() 47 if (r < 0) { in hvm_get_parameter() 49 param_name(idx), idx, r); in hvm_get_parameter() 50 return r; in hvm_get_parameter() 53 return r; in hvm_get_parameter()
|
/include/math-emu/ |
D | double.h | 117 #define FP_CMP_D(r,X,Y,un) _FP_CMP(D,2,r,X,Y,un) argument 118 #define FP_CMP_EQ_D(r,X,Y) _FP_CMP_EQ(D,2,r,X,Y) argument 120 #define FP_TO_INT_D(r,X,rsz,rsg) _FP_TO_INT(D,2,r,X,rsz,rsg) argument 121 #define FP_TO_INT_ROUND_D(r,X,rsz,rsg) _FP_TO_INT_ROUND(D,2,r,X,rsz,rsg) argument 122 #define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,2,X,r,rs,rt) argument 192 #define FP_CMP_D(r,X,Y,un) _FP_CMP(D,1,r,X,Y,un) argument 193 #define FP_CMP_EQ_D(r,X,Y) _FP_CMP_EQ(D,1,r,X,Y) argument 195 #define FP_TO_INT_D(r,X,rsz,rsg) _FP_TO_INT(D,1,r,X,rsz,rsg) argument 196 #define FP_TO_INT_ROUND_D(r,X,rsz,rsg) _FP_TO_INT_ROUND(D,1,r,X,rsz,rsg) argument 197 #define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,1,X,r,rs,rt) argument
|
D | quad.h | 123 #define FP_CMP_Q(r,X,Y,un) _FP_CMP(Q,4,r,X,Y,un) argument 124 #define FP_CMP_EQ_Q(r,X,Y) _FP_CMP_EQ(Q,4,r,X,Y) argument 126 #define FP_TO_INT_Q(r,X,rsz,rsg) _FP_TO_INT(Q,4,r,X,rsz,rsg) argument 127 #define FP_TO_INT_ROUND_Q(r,X,rsz,rsg) _FP_TO_INT_ROUND(Q,4,r,X,rsz,rsg) argument 128 #define FP_FROM_INT_Q(X,r,rs,rt) _FP_FROM_INT(Q,4,X,r,rs,rt) argument 196 #define FP_CMP_Q(r,X,Y,un) _FP_CMP(Q,2,r,X,Y,un) argument 197 #define FP_CMP_EQ_Q(r,X,Y) _FP_CMP_EQ(Q,2,r,X,Y) argument 199 #define FP_TO_INT_Q(r,X,rsz,rsg) _FP_TO_INT(Q,2,r,X,rsz,rsg) argument 200 #define FP_TO_INT_ROUND_Q(r,X,rsz,rsg) _FP_TO_INT_ROUND(Q,2,r,X,rsz,rsg) argument 201 #define FP_FROM_INT_Q(X,r,rs,rt) _FP_FROM_INT(Q,2,X,r,rs,rt) argument
|
D | single.h | 106 #define FP_CMP_S(r,X,Y,un) _FP_CMP(S,1,r,X,Y,un) argument 107 #define FP_CMP_EQ_S(r,X,Y) _FP_CMP_EQ(S,1,r,X,Y) argument 109 #define FP_TO_INT_S(r,X,rsz,rsg) _FP_TO_INT(S,1,r,X,rsz,rsg) argument 110 #define FP_TO_INT_ROUND_S(r,X,rsz,rsg) _FP_TO_INT_ROUND(S,1,r,X,rsz,rsg) argument 111 #define FP_FROM_INT_S(X,r,rs,rt) _FP_FROM_INT(S,1,X,r,rs,rt) argument
|
D | op-common.h | 657 #define _FP_TO_INT(fs, wc, r, X, rsize, rsigned) \ argument 667 r = 0; \ 679 r = 0; \ 683 _FP_FRAC_ASSEMBLE_##wc(r, X, rsize); \ 688 r = 1; \ 689 r <<= rsize - 1; \ 690 r -= 1 - X##_s; \ 694 r = 0; \ 696 r = ~r; \ 704 _FP_FRAC_ASSEMBLE_##wc(r, X, rsize); \ [all …]
|
/include/linux/mtd/ |
D | map.h | 272 map_word r; \ 275 r.x[i] = (val1).x[i] & (val2).x[i]; \ 276 r; \ 281 map_word r; \ 284 r.x[i] = (val1).x[i] & ~(val2).x[i]; \ 285 r; \ 290 map_word r; \ 293 r.x[i] = (val1).x[i] | (val2).x[i]; \ 294 r; \ 323 map_word r; in map_word_load() local [all …]
|
/include/video/ |
D | tgafb.h | 228 TGA_WRITE_REG(struct tga_par *par, u32 v, u32 r) in TGA_WRITE_REG() argument 230 writel(v, par->tga_regs_base +r); in TGA_WRITE_REG() 234 TGA_READ_REG(struct tga_par *par, u32 r) in TGA_READ_REG() argument 236 return readl(par->tga_regs_base +r); in TGA_READ_REG() 240 BT485_WRITE(struct tga_par *par, u8 v, u8 r) in BT485_WRITE() argument 242 TGA_WRITE_REG(par, r, TGA_RAMDAC_SETUP_REG); in BT485_WRITE() 243 TGA_WRITE_REG(par, v | (r << 8), TGA_RAMDAC_REG); in BT485_WRITE()
|
/include/linux/mfd/ |
D | tmio.h | 15 #define tmio_ioread16_rep(r, b, l) readsw(r, b, l) argument 21 #define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) argument
|