/drivers/infiniband/core/ |
D | rdma_core.c | 110 static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive) in uverbs_try_lock_object() argument 123 if (!exclusive) in uverbs_try_lock_object() 187 int id, bool exclusive) in lookup_get_idr_uobject() argument 215 int id, bool exclusive) in lookup_get_fd_uobject() argument 222 if (exclusive) in lookup_get_fd_uobject() 246 int id, bool exclusive) in rdma_lookup_get_uobject() argument 251 uobj = type->type_class->lookup_get(type, ucontext, id, exclusive); in rdma_lookup_get_uobject() 260 ret = uverbs_try_lock_object(uobj, exclusive); in rdma_lookup_get_uobject() 269 uobj->type->type_class->lookup_put(uobj, exclusive); in rdma_lookup_get_uobject() 410 static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive) in assert_uverbs_usecnt() argument [all …]
|
/drivers/mtd/ubi/ |
D | kapi.c | 173 if (vol->exclusive) in ubi_open_volume() 179 if (vol->exclusive || vol->writers > 0) in ubi_open_volume() 185 if (vol->exclusive || vol->writers || vol->readers || in ubi_open_volume() 188 vol->exclusive = 1; in ubi_open_volume() 192 if (vol->metaonly || vol->exclusive) in ubi_open_volume() 355 vol->exclusive = 0; in ubi_close_volume()
|
D | cdev.c | 63 users = vol->readers + vol->writers + vol->exclusive + vol->metaonly; in get_exclusive() 70 vol->exclusive = 1; in get_exclusive() 90 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); in revoke_exclusive() 91 vol->exclusive = 0; in revoke_exclusive() 99 vol->exclusive = 1; in revoke_exclusive()
|
D | ubi.h | 356 int exclusive; member
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_fence.c | 391 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) in nouveau_fence_sync() argument 400 if (!exclusive) { in nouveau_fence_sync() 410 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { in nouveau_fence_sync() 429 if (!exclusive || !fobj) in nouveau_fence_sync()
|
D | nouveau_fence.h | 30 int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
|
D | nouveau_bo.h | 87 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
|
D | nouveau_bo.c | 1553 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) in nouveau_bo_fence() argument 1557 if (exclusive) in nouveau_bo_fence()
|
/drivers/staging/lustre/include/linux/libcfs/ |
D | libcfs_hash.h | 275 void (*hs_lock)(union cfs_hash_lock *lock, int exclusive); 277 void (*hs_unlock)(union cfs_hash_lock *lock, int exclusive); 279 void (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive); 281 void (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive);
|
/drivers/clk/imx/ |
D | Makefile | 9 clk-gate-exclusive.o \
|
/drivers/staging/lustre/lnet/libcfs/ |
D | hash.c | 119 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {} in cfs_hash_nl_lock() argument 122 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {} in cfs_hash_nl_unlock() argument 125 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive) in cfs_hash_spin_lock() argument 132 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive) in cfs_hash_spin_unlock() argument 139 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive) in cfs_hash_rw_lock() argument 142 if (!exclusive) in cfs_hash_rw_lock() 149 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive) in cfs_hash_rw_unlock() argument 152 if (!exclusive) in cfs_hash_rw_unlock()
|
/drivers/gpu/drm/msm/ |
D | msm_drv.h | 225 struct msm_fence_context *fctx, bool exclusive); 227 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
|
D | msm_gem.c | 614 struct msm_fence_context *fctx, bool exclusive) in msm_gem_sync_object() argument 632 if (!exclusive || !fobj) in msm_gem_sync_object() 649 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) in msm_gem_move_to_active() argument 654 if (exclusive) in msm_gem_move_to_active()
|
/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gpu.h | 188 unsigned int context, bool exclusive, bool implicit);
|
D | etnaviv_gpu.c | 1092 unsigned int context, bool exclusive, bool explicit) in etnaviv_gpu_fence_sync_obj() argument 1099 if (!exclusive) { in etnaviv_gpu_fence_sync_obj() 1123 if (!exclusive || !fobj) in etnaviv_gpu_fence_sync_obj()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm.c | 1472 struct dma_fence *exclusive, in amdgpu_vm_bo_update_mapping() argument 1579 r = amdgpu_sync_fence(adev, &job->sync, exclusive); in amdgpu_vm_bo_update_mapping() 1631 struct dma_fence *exclusive, in amdgpu_vm_bo_split_mapping() argument 1694 r = amdgpu_vm_bo_update_mapping(adev, exclusive, in amdgpu_vm_bo_split_mapping() 1733 struct dma_fence *exclusive; in amdgpu_vm_bo_update() local 1740 exclusive = NULL; in amdgpu_vm_bo_update() 1751 exclusive = reservation_object_get_excl(bo->tbo.resv); in amdgpu_vm_bo_update() 1765 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, in amdgpu_vm_bo_update()
|
/drivers/isdn/i4l/ |
D | isdn_net.c | 2519 lp->exclusive = -1; in _isdn_setup() 2753 if (cfg->exclusive > 0) { in isdn_net_setcfg() 2762 lp->exclusive = -1; in isdn_net_setcfg() 2770 lp->exclusive = i; in isdn_net_setcfg() 2773 lp->exclusive = -1; in isdn_net_setcfg() 2774 if ((lp->pre_device != -1) && (cfg->exclusive == -1)) { in isdn_net_setcfg() 2868 cfg->exclusive = lp->exclusive; in isdn_net_getcfg() 3102 if (p->local->exclusive != -1) in isdn_net_realrm()
|
D | isdn_ppp.c | 165 char exclusive[ISDN_MAX_CHANNELS]; /* exclusive flags */ in isdn_ppp_bind() local 166 memset(exclusive, 0, ISDN_MAX_CHANNELS); in isdn_ppp_bind() 170 exclusive[lp->pppbind] = 1; in isdn_ppp_bind() 177 …if (ippp_table[i]->state == IPPP_OPEN && !exclusive[ippp_table[i]->minor]) { /* OPEN, but not conn… in isdn_ppp_bind()
|
/drivers/target/sbp/ |
D | sbp_target.h | 132 int exclusive; member
|
D | sbp_target.c | 125 int exclusive) in sbp_login_count_all_by_lun() argument 141 if (!exclusive || login->exclusive) in sbp_login_count_all_by_lun() 425 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); in sbp_management_request_login()
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | mcg.c | 777 flags |= ctrl->exclusive ? (1 << 2) : 0; in trans_rule_ctrl_to_hw() 1059 .exclusive = 0, in mlx4_tunnel_steer_add() 1377 .exclusive = 0, in mlx4_trans_to_dmfs_attach() 1469 .exclusive = 0, in mlx4_flow_steer_promisc_add()
|
/drivers/perf/ |
D | xgene_pmu.c | 457 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11), 471 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23), 472 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24),
|
/drivers/crypto/caam/ |
D | qi.c | 632 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX | in alloc_rsp_fq_cpu()
|
/drivers/firmware/ |
D | Kconfig | 20 are mutually exclusive.
|
/drivers/block/ |
D | rbd.c | 826 bool exclusive; member 872 rbd_opts->exclusive = true; in parse_rbd_opts_token() 3640 if (!rbd_dev->opts->exclusive) { in rbd_handle_request_lock() 4115 if (rbd_dev->opts->exclusive) { in rbd_queue_workfn() 5686 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; in rbd_add_parse_args() 6215 if (rbd_dev->opts->exclusive) { in do_rbd_add()
|