| /include/linux/ |
| D | bvec.h | 101 #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) argument 104 #define mp_bvec_iter_page(bvec, iter) \ argument 105 (__bvec_iter_bvec((bvec), (iter))->bv_page) 107 #define mp_bvec_iter_len(bvec, iter) \ argument 108 min((iter).bi_size, \ 109 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) 111 #define mp_bvec_iter_offset(bvec, iter) \ argument 112 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 114 #define mp_bvec_iter_page_idx(bvec, iter) \ argument 115 (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE) [all …]
|
| D | iov_iter.h | 24 size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2, in iterate_ubuf() argument 27 void __user *base = iter->ubuf; in iterate_ubuf() 30 remain = step(base + iter->iov_offset, 0, len, priv, priv2); in iterate_ubuf() 32 iter->iov_offset += progress; in iterate_ubuf() 33 iter->count -= progress; in iterate_ubuf() 41 size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2, in iterate_iovec() argument 44 const struct iovec *p = iter->__iov; in iterate_iovec() 45 size_t progress = 0, skip = iter->iov_offset; in iterate_iovec() 64 iter->nr_segs -= p - iter->__iov; in iterate_iovec() 65 iter->__iov = p; in iterate_iovec() [all …]
|
| D | radix-tree.h | 233 struct radix_tree_iter *iter, void __rcu **slot); 249 const struct radix_tree_iter *iter, unsigned int tag); 264 struct radix_tree_iter *iter, gfp_t gfp, 281 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) in radix_tree_iter_init() argument 291 iter->index = 0; in radix_tree_iter_init() 292 iter->next_index = start; in radix_tree_iter_init() 310 struct radix_tree_iter *iter, unsigned flags); 324 struct radix_tree_iter *iter, unsigned long index) in radix_tree_iter_lookup() argument 326 radix_tree_iter_init(iter, index); in radix_tree_iter_lookup() 327 return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); in radix_tree_iter_lookup() [all …]
|
| D | bio.h | 26 #define bio_iter_iovec(bio, iter) \ argument 27 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 29 #define bio_iter_page(bio, iter) \ argument 30 bvec_iter_page((bio)->bi_io_vec, (iter)) 31 #define bio_iter_len(bio, iter) \ argument 32 bvec_iter_len((bio)->bi_io_vec, (iter)) 33 #define bio_iter_offset(bio, iter) \ argument 34 bvec_iter_offset((bio)->bi_io_vec, (iter)) 40 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) argument 41 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) argument [all …]
|
| D | devcoredump.h | 29 struct scatterlist *iter; in _devcd_free_sgtable() local 33 iter = table; in _devcd_free_sgtable() 34 for_each_sg(table, iter, sg_nents(table), i) { in _devcd_free_sgtable() 35 page = sg_page(iter); in _devcd_free_sgtable() 41 iter = table; in _devcd_free_sgtable() 43 while (!sg_is_last(iter)) { in _devcd_free_sgtable() 44 iter++; in _devcd_free_sgtable() 45 if (sg_is_chain(iter)) { in _devcd_free_sgtable() 46 iter = sg_chain_ptr(iter); in _devcd_free_sgtable() 48 delete_iter = iter; in _devcd_free_sgtable()
|
| D | kmsg_dump.h | 71 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, 74 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, 77 void kmsg_dump_rewind(struct kmsg_dump_iter *iter); 89 static inline bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, in kmsg_dump_get_line() argument 95 static inline bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, in kmsg_dump_get_buffer() argument 101 static inline void kmsg_dump_rewind(struct kmsg_dump_iter *iter) in kmsg_dump_rewind() argument
|
| D | dma-fence-chain.h | 120 #define dma_fence_chain_for_each(iter, head) \ argument 121 for (iter = dma_fence_get(head); iter; \ 122 iter = dma_fence_chain_walk(iter))
|
| D | generic-radix-tree.h | 312 static inline void __genradix_iter_advance(struct genradix_iter *iter, in __genradix_iter_advance() argument 315 if (iter->offset + obj_size < iter->offset) { in __genradix_iter_advance() 316 iter->offset = SIZE_MAX; in __genradix_iter_advance() 317 iter->pos = SIZE_MAX; in __genradix_iter_advance() 321 iter->offset += obj_size; in __genradix_iter_advance() 324 (iter->offset & (GENRADIX_NODE_SIZE - 1)) + obj_size > GENRADIX_NODE_SIZE) in __genradix_iter_advance() 325 iter->offset = round_up(iter->offset, GENRADIX_NODE_SIZE); in __genradix_iter_advance() 327 iter->pos++; in __genradix_iter_advance() 333 static inline void __genradix_iter_rewind(struct genradix_iter *iter, in __genradix_iter_rewind() argument 336 if (iter->offset == 0 || in __genradix_iter_rewind() [all …]
|
| D | iomap.h | 145 struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos, 232 int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops); 240 static inline u64 iomap_length(const struct iomap_iter *iter) in iomap_length() argument 242 u64 end = iter->iomap.offset + iter->iomap.length; in iomap_length() 244 if (iter->srcmap.type != IOMAP_HOLE) in iomap_length() 245 end = min(end, iter->srcmap.offset + iter->srcmap.length); in iomap_length() 246 return min(iter->len, end - iter->pos); in iomap_length() 292 static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter) in iomap_want_unshare_iter() argument 294 return (iter->iomap.flags & IOMAP_F_SHARED) && in iomap_want_unshare_iter() 295 iter->srcmap.type == IOMAP_MAPPED; in iomap_want_unshare_iter() [all …]
|
| D | uio.h | 85 static inline const struct iovec *iter_iov(const struct iov_iter *iter) in iter_iov() argument 87 if (iter->iter_type == ITER_UBUF) in iter_iov() 88 return (const struct iovec *) &iter->__ubuf_iovec; in iter_iov() 89 return iter->__iov; in iter_iov() 92 #define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset) argument 93 #define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset) argument 100 static inline void iov_iter_save_state(struct iov_iter *iter, in iov_iter_save_state() argument 103 state->iov_offset = iter->iov_offset; in iov_iter_save_state() 104 state->count = iter->count; in iov_iter_save_state() 105 state->nr_segs = iter->nr_segs; in iov_iter_save_state() [all …]
|
| D | crash_dump.h | 29 ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn, 139 ssize_t read_from_oldmem(struct iov_iter *iter, size_t count, 142 static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count, in read_from_oldmem() argument
|
| D | netfs.h | 242 struct iov_iter iter; /* Unencrypted-side iterator */ member 339 struct iov_iter *iter, 347 struct iov_iter *iter, 397 ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter); 398 ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); 399 ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); 400 ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); 403 ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, 408 ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter, 442 size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
|
| D | backing-file.h | 28 ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, 31 ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
|
| D | ring_buffer.h | 202 void ring_buffer_read_finish(struct ring_buffer_iter *iter); 205 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); 206 void ring_buffer_iter_advance(struct ring_buffer_iter *iter); 207 void ring_buffer_iter_reset(struct ring_buffer_iter *iter); 208 int ring_buffer_iter_empty(struct ring_buffer_iter *iter); 209 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
|
| D | kvm_host.h | 1108 static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter) in kvm_memslot_iter_next() argument 1110 iter->node = rb_next(iter->node); in kvm_memslot_iter_next() 1111 if (!iter->node) in kvm_memslot_iter_next() 1114 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); in kvm_memslot_iter_next() 1117 static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter, in kvm_memslot_iter_start() argument 1125 iter->slots = slots; in kvm_memslot_iter_start() 1131 iter->node = NULL; in kvm_memslot_iter_start() 1135 iter->node = tmp; in kvm_memslot_iter_start() 1146 if (iter->node) { in kvm_memslot_iter_start() 1152 tmp = rb_prev(iter->node); in kvm_memslot_iter_start() [all …]
|
| /include/linux/mtd/ |
| D | nand.h | 924 struct nand_io_iter *iter) in nanddev_io_page_iter_init() argument 928 iter->req.type = reqtype; in nanddev_io_page_iter_init() 929 iter->req.mode = req->mode; in nanddev_io_page_iter_init() 930 iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); in nanddev_io_page_iter_init() 931 iter->req.ooboffs = req->ooboffs; in nanddev_io_page_iter_init() 932 iter->oobbytes_per_page = mtd_oobavail(mtd, req); in nanddev_io_page_iter_init() 933 iter->dataleft = req->len; in nanddev_io_page_iter_init() 934 iter->oobleft = req->ooblen; in nanddev_io_page_iter_init() 935 iter->req.databuf.in = req->datbuf; in nanddev_io_page_iter_init() 936 iter->req.datalen = min_t(unsigned int, in nanddev_io_page_iter_init() [all …]
|
| /include/drm/ |
| D | drm_damage_helper.h | 46 #define drm_atomic_for_each_plane_damage(iter, rect) \ argument 47 while (drm_atomic_helper_damage_iter_next(iter, rect)) 74 drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, 78 drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
|
| D | drm_client.h | 204 #define drm_client_for_each_connector_iter(connector, iter) \ argument 205 drm_for_each_connector_iter(connector, iter) \
|
| /include/media/ |
| D | media-entity.h | 383 #define media_entity_for_each_pad(entity, iter) \ argument 384 for (iter = (entity)->pads; \ 385 iter < &(entity)->pads[(entity)->num_pads]; \ 386 ++iter) 1196 struct media_pipeline_pad_iter *iter, 1209 #define media_pipeline_for_each_pad(pipe, iter, pad) \ argument 1210 for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \ 1212 pad = __media_pipeline_pad_iter_next((pipe), iter, pad)) 1230 struct media_pipeline_entity_iter *iter); 1239 void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter); [all …]
|
| /include/linux/ceph/ |
| D | messenger.h | 133 struct bvec_iter iter; member 140 BUG_ON(!(it)->iter.bi_size); \ 141 __cur_n = min((it)->iter.bi_size, __n); \ 143 bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \ 144 if (!(it)->iter.bi_size && (it)->bio->bi_next) { \ 147 (it)->iter = (it)->bio->bi_iter; \ 167 __cur_iter = (it)->iter; \ 177 struct bvec_iter iter; member 181 BUG_ON((n) > (it)->iter.bi_size); \ 183 bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \ [all …]
|
| /include/trace/ |
| D | trace_custom_events.h | 79 trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \ 82 struct trace_seq *s = &iter->seq; \ 83 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 87 field = (typeof(field))iter->ent; \ 89 ret = trace_raw_output_prep(iter, trace_event); \ 93 trace_event_printf(iter, print); \
|
| D | trace_events.h | 189 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 192 struct trace_seq *s = &iter->seq; \ 193 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 197 field = (typeof(field))iter->ent; \ 199 ret = trace_raw_output_prep(iter, trace_event); \ 203 trace_event_printf(iter, print); \ 214 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 219 struct trace_seq *p = &iter->tmp_seq; \ 221 entry = iter->ent; \ 231 return trace_output_call(iter, #call, print); \
|
| /include/net/ |
| D | bonding.h | 83 #define bond_for_each_slave(bond, pos, iter) \ argument 84 netdev_for_each_lower_private((bond)->dev, pos, iter) 87 #define bond_for_each_slave_rcu(bond, pos, iter) \ argument 88 netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) 403 struct list_head *iter; in bond_slave_state_change() local 406 bond_for_each_slave(bond, tmp, iter) { in bond_slave_state_change() 416 struct list_head *iter; in bond_slave_state_notify() local 419 bond_for_each_slave(bond, tmp, iter) { in bond_slave_state_notify() 640 struct list_head *iter; in bond_slave_link_notify() local 643 bond_for_each_slave(bond, tmp, iter) { in bond_slave_link_notify() [all …]
|
| /include/linux/io_uring/ |
| D | cmd.h | 37 struct iov_iter *iter, void *ioucmd); 65 struct iov_iter *iter, void *ioucmd) in io_uring_cmd_import_fixed() argument
|
| /include/net/netfilter/ |
| D | nf_conntrack_expect.h | 116 void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data), void *da… 118 bool (*iter)(struct nf_conntrack_expect *e, void *data),
|