/drivers/crypto/virtio/ |
D | virtio_crypto_mgr.c | 187 struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev; in virtcrypto_get_dev_node() local 192 list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) { in virtcrypto_get_dev_node() 194 if ((node == dev_to_node(&tmp_dev->vdev->dev) || in virtcrypto_get_dev_node() 195 dev_to_node(&tmp_dev->vdev->dev) < 0) && in virtcrypto_get_dev_node() 196 virtcrypto_dev_started(tmp_dev) && in virtcrypto_get_dev_node() 197 virtcrypto_algo_is_supported(tmp_dev, service, algo)) { in virtcrypto_get_dev_node() 198 ctr = atomic_read(&tmp_dev->ref_count); in virtcrypto_get_dev_node() 200 vcrypto_dev = tmp_dev; in virtcrypto_get_dev_node() 210 list_for_each_entry(tmp_dev, in virtcrypto_get_dev_node() 212 if (virtcrypto_dev_started(tmp_dev) && in virtcrypto_get_dev_node() [all …]
|
/drivers/crypto/intel/qat/qat_common/ |
D | qat_crypto.c | 53 struct adf_accel_dev *accel_dev = NULL, *tmp_dev; in qat_crypto_get_instance_node() local 57 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { in qat_crypto_get_instance_node() 60 if ((node == dev_to_node(&GET_DEV(tmp_dev)) || in qat_crypto_get_instance_node() 61 dev_to_node(&GET_DEV(tmp_dev)) < 0) && in qat_crypto_get_instance_node() 62 adf_dev_started(tmp_dev) && in qat_crypto_get_instance_node() 63 !list_empty(&tmp_dev->crypto_list)) { in qat_crypto_get_instance_node() 64 ctr = atomic_read(&tmp_dev->ref_count); in qat_crypto_get_instance_node() 66 accel_dev = tmp_dev; in qat_crypto_get_instance_node() 75 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { in qat_crypto_get_instance_node() 76 if (adf_dev_started(tmp_dev) && in qat_crypto_get_instance_node() [all …]
|
D | qat_compression.c | 57 struct adf_accel_dev *tmp_dev; in qat_compression_get_instance_node() local 61 tmp_dev = list_entry(itr, struct adf_accel_dev, list); in qat_compression_get_instance_node() 62 tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev)); in qat_compression_get_instance_node() 65 adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) { in qat_compression_get_instance_node() 66 ctr = atomic_read(&tmp_dev->ref_count); in qat_compression_get_instance_node() 68 accel_dev = tmp_dev; in qat_compression_get_instance_node() 78 struct adf_accel_dev *tmp_dev; in qat_compression_get_instance_node() local 80 tmp_dev = list_entry(itr, struct adf_accel_dev, list); in qat_compression_get_instance_node() 81 if (adf_dev_started(tmp_dev) && in qat_compression_get_instance_node() 82 !list_empty(&tmp_dev->compression_list)) { in qat_compression_get_instance_node() [all …]
|
/drivers/md/ |
D | md-linear.c | 209 struct dev_info *tmp_dev; in linear_make_request() local 217 tmp_dev = which_dev(mddev, bio_sector); in linear_make_request() 218 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; in linear_make_request() 219 end_sector = tmp_dev->end_sector; in linear_make_request() 220 data_offset = tmp_dev->rdev->data_offset; in linear_make_request() 226 if (unlikely(is_rdev_broken(tmp_dev->rdev))) { in linear_make_request() 227 md_error(mddev, tmp_dev->rdev); in linear_make_request() 242 bio_set_dev(bio, tmp_dev->rdev->bdev); in linear_make_request() 263 tmp_dev->rdev->bdev, in linear_make_request() 264 (unsigned long long)tmp_dev->rdev->sectors, in linear_make_request()
|
D | raid0.c | 552 struct md_rdev *tmp_dev; in raid0_map_submit_bio() local 561 tmp_dev = map_sector(mddev, zone, bio_sector, §or); in raid0_map_submit_bio() 564 tmp_dev = map_sector(mddev, zone, sector, §or); in raid0_map_submit_bio() 572 if (unlikely(is_rdev_broken(tmp_dev))) { in raid0_map_submit_bio() 574 md_error(mddev, tmp_dev); in raid0_map_submit_bio() 578 bio_set_dev(bio, tmp_dev->bdev); in raid0_map_submit_bio() 580 tmp_dev->data_offset; in raid0_map_submit_bio()
|
/drivers/firmware/arm_ffa/ |
D | bus.c | 157 struct ffa_device *tmp_dev; in ffa_device_is_valid() local 161 tmp_dev = to_ffa_dev(dev); in ffa_device_is_valid() 162 if (tmp_dev == ffa_dev) { in ffa_device_is_valid()
|
/drivers/pcmcia/ |
D | ds.c | 217 struct device *tmp_dev; in pcmcia_get_dev() local 218 tmp_dev = get_device(&p_dev->dev); in pcmcia_get_dev() 219 if (!tmp_dev) in pcmcia_get_dev() 221 return to_pcmcia_dev(tmp_dev); in pcmcia_get_dev() 482 struct pcmcia_device *p_dev, *tmp_dev; in pcmcia_device_add() local 528 list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) in pcmcia_device_add() 529 if (p_dev->func == tmp_dev->func) { in pcmcia_device_add() 530 p_dev->function_config = tmp_dev->function_config; in pcmcia_device_add() 531 p_dev->irq = tmp_dev->irq; in pcmcia_device_add()
|
/drivers/s390/block/ |
D | dasd_devmap.c | 665 struct dasd_device *tmp_dev; in dasd_devmap_check_copy_relation() local 716 tmp_dev = device; in dasd_devmap_check_copy_relation() 718 tmp_dev = copy->entry[j].device; in dasd_devmap_check_copy_relation() 720 if (!tmp_dev) in dasd_devmap_check_copy_relation() 723 if (dasd_devmap_get_pprc_status(tmp_dev, &tmp_dat)) in dasd_devmap_check_copy_relation() 727 dev_warn(&tmp_dev->cdev->dev, in dasd_devmap_check_copy_relation()
|
/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
D | lag.c | 1229 struct mlx5_core_dev *tmp_dev; in __mlx5_lag_dev_add_mdev() local 1231 tmp_dev = mlx5_get_next_phys_dev_lag(dev); in __mlx5_lag_dev_add_mdev() 1232 if (tmp_dev) in __mlx5_lag_dev_add_mdev() 1233 ldev = mlx5_lag_dev(tmp_dev); in __mlx5_lag_dev_add_mdev()
|
/drivers/iommu/intel/ |
D | dmar.c | 107 struct device *tmp_dev; in dmar_free_dev_scope() local 110 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev) in dmar_free_dev_scope() 111 put_device(tmp_dev); in dmar_free_dev_scope()
|
/drivers/net/usb/ |
D | hso.c | 2873 struct hso_device *tmp_dev = NULL; in hso_probe() local 2913 tmp_dev = hso_dev; in hso_probe() 2934 if (tmp_dev) in hso_probe() 2935 hso_dev = tmp_dev; in hso_probe()
|
/drivers/target/ |
D | target_core_user.c | 3265 struct tcmu_dev *udev, *tmp_dev; in check_timedout_devices() local 3272 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { in check_timedout_devices()
|