/drivers/gpu/drm/ |
D | drm_vma_manager.c | 163 struct rb_node *iter; in drm_vma_offset_lookup_locked() local 166 iter = mgr->vm_addr_space_rb.rb_node; in drm_vma_offset_lookup_locked() 169 while (likely(iter)) { in drm_vma_offset_lookup_locked() 170 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); in drm_vma_offset_lookup_locked() 173 iter = iter->rb_right; in drm_vma_offset_lookup_locked() 178 iter = iter->rb_left; in drm_vma_offset_lookup_locked() 197 struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node; in _drm_vma_offset_add_rb() local 201 while (likely(*iter)) { in _drm_vma_offset_add_rb() 202 parent = *iter; in _drm_vma_offset_add_rb() 203 iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb); in _drm_vma_offset_add_rb() [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.h | 88 struct mlx4_icm_iter *iter) in mlx4_icm_first() argument 90 iter->icm = icm; in mlx4_icm_first() 91 iter->chunk = list_empty(&icm->chunk_list) ? in mlx4_icm_first() 94 iter->page_idx = 0; in mlx4_icm_first() 97 static inline int mlx4_icm_last(struct mlx4_icm_iter *iter) in mlx4_icm_last() argument 99 return !iter->chunk; in mlx4_icm_last() 102 static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) in mlx4_icm_next() argument 104 if (++iter->page_idx >= iter->chunk->nsg) { in mlx4_icm_next() 105 if (iter->chunk->list.next == &iter->icm->chunk_list) { in mlx4_icm_next() 106 iter->chunk = NULL; in mlx4_icm_next() [all …]
|
/drivers/dma/ppc4xx/ |
D | adma.c | 190 struct ppc440spe_adma_desc_slot *iter) in print_cb_list() argument 192 for (; iter; iter = iter->hw_next) in print_cb_list() 193 print_cb(chan, iter->hw_desc); in print_cb_list() 338 struct ppc440spe_adma_desc_slot *iter; in ppc440spe_desc_init_dma01pq() local 352 list_for_each_entry(iter, &desc->group_list, chain_node) { in ppc440spe_desc_init_dma01pq() 353 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq() 354 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_desc_init_dma01pq() 356 if (likely(!list_is_last(&iter->chain_node, in ppc440spe_desc_init_dma01pq() 359 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_desc_init_dma01pq() 361 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_desc_init_dma01pq() [all …]
|
/drivers/s390/cio/ |
D | blacklist.c | 287 struct ccwdev_iter *iter = s->private; in cio_ignore_proc_seq_start() local 291 memset(iter, 0, sizeof(*iter)); in cio_ignore_proc_seq_start() 292 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); in cio_ignore_proc_seq_start() 293 iter->devno = *offset % (__MAX_SUBCHANNEL + 1); in cio_ignore_proc_seq_start() 294 return iter; in cio_ignore_proc_seq_start() 305 struct ccwdev_iter *iter; in cio_ignore_proc_seq_next() local 309 iter = it; in cio_ignore_proc_seq_next() 310 if (iter->devno == __MAX_SUBCHANNEL) { in cio_ignore_proc_seq_next() 311 iter->devno = 0; in cio_ignore_proc_seq_next() 312 iter->ssid++; in cio_ignore_proc_seq_next() [all …]
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_fs.c | 60 struct ipoib_mcast_iter *iter; in ipoib_mcg_seq_start() local 63 iter = ipoib_mcast_iter_init(file->private); in ipoib_mcg_seq_start() 64 if (!iter) in ipoib_mcg_seq_start() 68 if (ipoib_mcast_iter_next(iter)) { in ipoib_mcg_seq_start() 69 kfree(iter); in ipoib_mcg_seq_start() 74 return iter; in ipoib_mcg_seq_start() 80 struct ipoib_mcast_iter *iter = iter_ptr; in ipoib_mcg_seq_next() local 84 if (ipoib_mcast_iter_next(iter)) { in ipoib_mcg_seq_next() 85 kfree(iter); in ipoib_mcg_seq_next() 89 return iter; in ipoib_mcg_seq_next() [all …]
|
D | ipoib_multicast.c | 900 struct ipoib_mcast_iter *iter; in ipoib_mcast_iter_init() local 902 iter = kmalloc(sizeof *iter, GFP_KERNEL); in ipoib_mcast_iter_init() 903 if (!iter) in ipoib_mcast_iter_init() 906 iter->dev = dev; in ipoib_mcast_iter_init() 907 memset(iter->mgid.raw, 0, 16); in ipoib_mcast_iter_init() 909 if (ipoib_mcast_iter_next(iter)) { in ipoib_mcast_iter_init() 910 kfree(iter); in ipoib_mcast_iter_init() 914 return iter; in ipoib_mcast_iter_init() 917 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) in ipoib_mcast_iter_next() argument 919 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); in ipoib_mcast_iter_next() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.h | 100 struct mthca_icm_iter *iter) in mthca_icm_first() argument 102 iter->icm = icm; in mthca_icm_first() 103 iter->chunk = list_empty(&icm->chunk_list) ? in mthca_icm_first() 106 iter->page_idx = 0; in mthca_icm_first() 109 static inline int mthca_icm_last(struct mthca_icm_iter *iter) in mthca_icm_last() argument 111 return !iter->chunk; in mthca_icm_last() 114 static inline void mthca_icm_next(struct mthca_icm_iter *iter) in mthca_icm_next() argument 116 if (++iter->page_idx >= iter->chunk->nsg) { in mthca_icm_next() 117 if (iter->chunk->list.next == &iter->icm->chunk_list) { in mthca_icm_next() 118 iter->chunk = NULL; in mthca_icm_next() [all …]
|
/drivers/staging/lustre/lustre/lov/ |
D | lov_pool.c | 175 struct pool_iterator *iter = (struct pool_iterator *)s->private; in pool_proc_next() local 178 LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); in pool_proc_next() 181 if (*pos >= pool_tgt_count(iter->pool)) in pool_proc_next() 185 prev_idx = iter->idx; in pool_proc_next() 186 down_read(&pool_tgt_rw_sem(iter->pool)); in pool_proc_next() 187 iter->idx++; in pool_proc_next() 188 if (iter->idx == pool_tgt_count(iter->pool)) { in pool_proc_next() 189 iter->idx = prev_idx; /* we stay on the last entry */ in pool_proc_next() 190 up_read(&pool_tgt_rw_sem(iter->pool)); in pool_proc_next() 193 up_read(&pool_tgt_rw_sem(iter->pool)); in pool_proc_next() [all …]
|
/drivers/net/wireless/libertas/ |
D | firmware.c | 85 const struct lbs_fw_table *iter; in load_next_firmware_from_table() local 88 iter = priv->fw_table; in load_next_firmware_from_table() 90 iter = ++priv->fw_iter; in load_next_firmware_from_table() 98 if (!iter->helper) { in load_next_firmware_from_table() 104 if (iter->model != priv->fw_model) { in load_next_firmware_from_table() 105 iter++; in load_next_firmware_from_table() 109 priv->fw_iter = iter; in load_next_firmware_from_table() 110 do_load_firmware(priv, iter->helper, helper_firmware_cb); in load_next_firmware_from_table() 176 const struct lbs_fw_table *iter; in lbs_get_firmware() local 183 iter = fw_table; in lbs_get_firmware() [all …]
|
/drivers/base/ |
D | class.c | 293 void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, in class_dev_iter_init() argument 300 klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); in class_dev_iter_init() 301 iter->type = type; in class_dev_iter_init() 317 struct device *class_dev_iter_next(struct class_dev_iter *iter) in class_dev_iter_next() argument 323 knode = klist_next(&iter->ki); in class_dev_iter_next() 327 if (!iter->type || iter->type == dev->type) in class_dev_iter_next() 340 void class_dev_iter_exit(struct class_dev_iter *iter) in class_dev_iter_exit() argument 342 klist_iter_exit(&iter->ki); in class_dev_iter_exit() 367 struct class_dev_iter iter; in class_for_each_device() local 379 class_dev_iter_init(&iter, class, start, NULL); in class_for_each_device() [all …]
|
D | attribute_container.c | 182 #define klist_for_each_entry(pos, head, member, iter) \ argument 183 for (klist_iter_init(head, iter); (pos = ({ \ 184 struct klist_node *n = klist_next(iter); \ 186 ({ klist_iter_exit(iter) ; NULL; }); \ 216 struct klist_iter iter; in attribute_container_remove_device() local 224 klist_for_each_entry(ic, &cont->containers, node, &iter) { in attribute_container_remove_device() 260 struct klist_iter iter; in attribute_container_device_trigger() local 270 klist_for_each_entry(ic, &cont->containers, node, &iter) { in attribute_container_device_trigger() 427 struct klist_iter iter; in attribute_container_find_class_device() local 429 klist_for_each_entry(ic, &cont->containers, node, &iter) { in attribute_container_find_class_device() [all …]
|
/drivers/md/bcache/ |
D | bset.c | 55 struct btree_iter iter; in __bch_count_data() local 59 for_each_key(b, k, &iter) in __bch_count_data() 68 struct btree_iter iter; in __bch_check_keys() local 71 for_each_key(b, k, &iter) { in __bch_check_keys() 109 static void bch_btree_iter_next_check(struct btree_iter *iter) in bch_btree_iter_next_check() argument 111 struct bkey *k = iter->data->k, *next = bkey_next(k); in bch_btree_iter_next_check() 113 if (next < iter->data->end && in bch_btree_iter_next_check() 114 bkey_cmp(k, iter->b->ops->is_extents ? in bch_btree_iter_next_check() 116 bch_dump_bucket(iter->b); in bch_btree_iter_next_check() 123 static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} in bch_btree_iter_next_check() argument [all …]
|
D | extents.c | 29 static void sort_key_next(struct btree_iter *iter, in sort_key_next() argument 35 *i = iter->data[--iter->used]; in sort_key_next() 227 struct btree_iter *iter, in bch_btree_ptr_insert_fixup() argument 264 static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, in bch_extent_sort_fixup() argument 267 while (iter->used > 1) { in bch_extent_sort_fixup() 268 struct btree_iter_set *top = iter->data, *i = top + 1; in bch_extent_sort_fixup() 270 if (iter->used > 2 && in bch_extent_sort_fixup() 278 sort_key_next(iter, i); in bch_extent_sort_fixup() 279 heap_sift(iter, i - top, bch_extent_sort_cmp); in bch_extent_sort_fixup() 285 sort_key_next(iter, i); in bch_extent_sort_fixup() [all …]
|
D | btree.h | 200 #define for_each_cached_btree(b, c, iter) \ argument 201 for (iter = 0; \ 202 iter < ARRAY_SIZE((c)->bucket_hash); \ 203 iter++) \ 204 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
|
/drivers/infiniband/hw/qib/ |
D | qib_debugfs.c | 193 struct qib_qp_iter *iter; in DEBUGFS_FILE() local 197 iter = qib_qp_iter_init(s->private); in DEBUGFS_FILE() 198 if (!iter) in DEBUGFS_FILE() 202 if (qib_qp_iter_next(iter)) { in DEBUGFS_FILE() 203 kfree(iter); in DEBUGFS_FILE() 208 return iter; in DEBUGFS_FILE() 214 struct qib_qp_iter *iter = iter_ptr; in _qp_stats_seq_next() local 218 if (qib_qp_iter_next(iter)) { in _qp_stats_seq_next() 219 kfree(iter); in _qp_stats_seq_next() 223 return iter; in _qp_stats_seq_next() [all …]
|
/drivers/gpu/drm/radeon/ |
D | drm_buffer.h | 130 int iter = buffer->iterator + offset * 4; in drm_buffer_pointer_to_dword() local 131 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; in drm_buffer_pointer_to_dword() 144 int iter = buffer->iterator + offset; in drm_buffer_pointer_to_byte() local 145 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; in drm_buffer_pointer_to_byte()
|
/drivers/dma/ |
D | mv_xor.c | 239 struct mv_xor_desc_slot *iter, *_iter; in mv_xor_clean_completed_slots() local 242 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, in mv_xor_clean_completed_slots() 245 if (async_tx_test_ack(&iter->async_tx)) { in mv_xor_clean_completed_slots() 246 list_del(&iter->completed_node); in mv_xor_clean_completed_slots() 247 mv_xor_free_slots(mv_chan, iter); in mv_xor_clean_completed_slots() 276 struct mv_xor_desc_slot *iter, *_iter; in mv_xor_slot_cleanup() local 291 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, in mv_xor_slot_cleanup() 295 hw_desc = iter->hw_desc; in mv_xor_slot_cleanup() 297 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, in mv_xor_slot_cleanup() 301 mv_xor_clean_slot(iter, mv_chan); in mv_xor_slot_cleanup() [all …]
|
D | iop-adma.c | 120 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; in __iop_adma_slot_cleanup() local 130 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, in __iop_adma_slot_cleanup() 134 iter->async_tx.cookie, iter->idx, busy, in __iop_adma_slot_cleanup() 135 iter->async_tx.phys, iop_desc_get_next_desc(iter), in __iop_adma_slot_cleanup() 136 async_tx_test_ack(&iter->async_tx)); in __iop_adma_slot_cleanup() 151 if (iter->async_tx.phys == current_desc) { in __iop_adma_slot_cleanup() 153 if (busy || iop_desc_get_next_desc(iter)) in __iop_adma_slot_cleanup() 159 slot_cnt = iter->slot_cnt; in __iop_adma_slot_cleanup() 160 slots_per_op = iter->slots_per_op; in __iop_adma_slot_cleanup() 170 grp_start = iter; in __iop_adma_slot_cleanup() [all …]
|
/drivers/net/wireless/ath/carl9170/ |
D | debug.c | 305 struct carl9170_sta_tid *iter; in carl9170_debugfs_ampdu_state_read() local 311 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { in carl9170_debugfs_ampdu_state_read() 313 spin_lock_bh(&iter->lock); in carl9170_debugfs_ampdu_state_read() 316 cnt, iter->tid, iter->bsn, iter->snx, iter->hsn, in carl9170_debugfs_ampdu_state_read() 317 iter->max, iter->state, iter->counter); in carl9170_debugfs_ampdu_state_read() 322 iter->bitmap, CARL9170_BAW_BITS); in carl9170_debugfs_ampdu_state_read() 333 offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn)); in carl9170_debugfs_ampdu_state_read() 336 offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) % in carl9170_debugfs_ampdu_state_read() 341 " currently queued:%d\n", skb_queue_len(&iter->queue)); in carl9170_debugfs_ampdu_state_read() 344 skb_queue_walk(&iter->queue, skb) { in carl9170_debugfs_ampdu_state_read() [all …]
|
/drivers/scsi/qla4xxx/ |
D | ql4_attr.c | 129 struct sysfs_entry *iter; in qla4_8xxx_alloc_sysfs_attr() local 132 for (iter = bin_file_entries; iter->name; iter++) { in qla4_8xxx_alloc_sysfs_attr() 134 iter->attr); in qla4_8xxx_alloc_sysfs_attr() 138 iter->name, ret); in qla4_8xxx_alloc_sysfs_attr() 145 struct sysfs_entry *iter; in qla4_8xxx_free_sysfs_attr() local 147 for (iter = bin_file_entries; iter->name; iter++) in qla4_8xxx_free_sysfs_attr() 149 iter->attr); in qla4_8xxx_free_sysfs_attr()
|
/drivers/net/bonding/ |
D | bonding.h | 89 #define bond_for_each_slave(bond, pos, iter) \ argument 90 netdev_for_each_lower_private((bond)->dev, pos, iter) 93 #define bond_for_each_slave_rcu(bond, pos, iter) \ argument 94 netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) 350 struct list_head *iter; in bond_slave_state_change() local 353 bond_for_each_slave(bond, tmp, iter) { in bond_slave_state_change() 363 struct list_head *iter; in bond_slave_state_notify() local 366 bond_for_each_slave(bond, tmp, iter) { in bond_slave_state_notify() 574 struct list_head *iter; in bond_slave_has_mac() local 577 bond_for_each_slave(bond, tmp, iter) in bond_slave_has_mac() [all …]
|
D | bond_main.c | 285 struct list_head *iter; in bond_vlan_rx_add_vid() local 288 bond_for_each_slave(bond, slave, iter) { in bond_vlan_rx_add_vid() 298 bond_for_each_slave(bond, rollback_slave, iter) { in bond_vlan_rx_add_vid() 317 struct list_head *iter; in bond_vlan_rx_kill_vid() local 320 bond_for_each_slave(bond, slave, iter) in bond_vlan_rx_kill_vid() 339 struct list_head *iter; in bond_set_carrier() local 348 bond_for_each_slave(bond, slave, iter) { in bond_set_carrier() 491 struct list_head *iter; in bond_set_promiscuity() local 502 bond_for_each_slave(bond, slave, iter) { in bond_set_promiscuity() 514 struct list_head *iter; in bond_set_allmulti() local [all …]
|
/drivers/hid/ |
D | hid-wiimote-core.c | 619 const __u8 *mods, *iter; in wiimote_modules_load() local 625 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { in wiimote_modules_load() 626 if (wiimod_table[*iter]->flags & WIIMOD_FLAG_INPUT) { in wiimote_modules_load() 646 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { in wiimote_modules_load() 647 ops = wiimod_table[*iter]; in wiimote_modules_load() 668 for ( ; iter-- != mods; ) { in wiimote_modules_load() 669 ops = wiimod_table[*iter]; in wiimote_modules_load() 682 const __u8 *mods, *iter; in wiimote_modules_unload() local 693 for (iter = mods; *iter != WIIMOD_NULL; ++iter) in wiimote_modules_unload() 701 for ( ; iter-- != mods; ) { in wiimote_modules_unload() [all …]
|
/drivers/block/aoe/ |
D | aoecmd.c | 199 memset(&f->iter, 0, sizeof(f->iter)); in aoe_freetframe() 297 skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter) in skb_fillup() argument 302 __bio_for_each_segment(bv, bio, iter, iter) in skb_fillup() 343 ah->scnt = f->iter.bi_size >> 9; in ata_rw_frameinit() 344 put_lba(ah, f->iter.bi_sector); in ata_rw_frameinit() 353 skb_fillup(skb, f->buf->bio, f->iter); in ata_rw_frameinit() 355 skb->len += f->iter.bi_size; in ata_rw_frameinit() 356 skb->data_len = f->iter.bi_size; in ata_rw_frameinit() 357 skb->truesize += f->iter.bi_size; in ata_rw_frameinit() 385 f->iter = buf->iter; in aoecmd_ata_rw() [all …]
|
/drivers/s390/block/ |
D | scm_blk_cluster.c | 78 struct scm_request *iter; in scm_reserve_cluster() local 84 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { in scm_reserve_cluster() 85 if (clusters_intersect(scmrq, iter) && in scm_reserve_cluster() 87 rq_data_dir(iter->request) == WRITE)) { in scm_reserve_cluster() 124 struct req_iterator iter; in scm_prepare_cluster_request() local 165 rq_for_each_segment(bv, req, iter) { in scm_prepare_cluster_request()
|