/drivers/infiniband/core/ |
D | restrack.c | 35 xa_init_flags(&rt[i].xa, XA_FLAGS_ALLOC); in rdma_restrack_init() 70 struct xarray *xa = &dev->res[i].xa; in rdma_restrack_clean() local 72 if (!xa_empty(xa)) { in rdma_restrack_clean() 79 xa_for_each(xa, index, e) { in rdma_restrack_clean() 99 xa_destroy(xa); in rdma_restrack_clean() 116 XA_STATE(xas, &rt->xa, 0); in rdma_restrack_count() 119 xa_lock(&rt->xa); in rdma_restrack_count() 122 xa_unlock(&rt->xa); in rdma_restrack_count() 247 ret = xa_insert(&rt->xa, res->id, res, GFP_KERNEL); in rdma_restrack_add() 255 ret = xa_insert(&rt->xa, counter->id, res, GFP_KERNEL); in rdma_restrack_add() [all …]
|
D | counters.c | 258 xa_lock(&rt->xa); in rdma_get_counter_auto_mode() 259 xa_for_each(&rt->xa, id, res) { in rdma_get_counter_auto_mode() 273 xa_unlock(&rt->xa); in rdma_get_counter_auto_mode() 370 xa_lock(&rt->xa); in get_running_counters_hwstat_sum() 371 xa_for_each(&rt->xa, id, res) { in get_running_counters_hwstat_sum() 375 xa_unlock(&rt->xa); in get_running_counters_hwstat_sum() 385 xa_lock(&rt->xa); in get_running_counters_hwstat_sum() 389 xa_unlock(&rt->xa); in get_running_counters_hwstat_sum()
|
D | restrack.h | 19 struct xarray xa; member
|
D | nldev.c | 772 xa_lock(&rt->xa); in fill_res_srq_qps() 773 xa_for_each(&rt->xa, id, res) { in fill_res_srq_qps() 799 xa_unlock(&rt->xa); in fill_res_srq_qps() 809 xa_unlock(&rt->xa); in fill_res_srq_qps() 899 xa_lock(&rt->xa); in fill_stat_counter_qps() 900 xa_for_each(&rt->xa, id, res) { in fill_stat_counter_qps() 910 xa_unlock(&rt->xa); in fill_stat_counter_qps() 915 xa_unlock(&rt->xa); in fill_stat_counter_qps() 1543 xa_lock(&rt->xa); in res_get_common_dumpit() 1549 xa_for_each(&rt->xa, id, res) { in res_get_common_dumpit() [all …]
|
D | device.c | 154 static void *xan_find_marked(struct xarray *xa, unsigned long *indexp, in xan_find_marked() argument 157 XA_STATE(xas, xa, *indexp); in xan_find_marked() 176 #define xan_for_each_marked(xa, index, entry, filter) \ argument 177 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \ 179 (index)++, entry = xan_find_marked(xa, &(index), filter))
|
D | cma.c | 224 struct xarray *xa = cma_pernet_xa(net, ps); in cma_ps_alloc() local 226 return xa_insert(xa, snum, bind_list, GFP_KERNEL); in cma_ps_alloc() 232 struct xarray *xa = cma_pernet_xa(net, ps); in cma_ps_find() local 234 return xa_load(xa, snum); in cma_ps_find() 240 struct xarray *xa = cma_pernet_xa(net, ps); in cma_ps_remove() local 242 xa_erase(xa, snum); in cma_ps_remove()
|
/drivers/iommu/ |
D | ioasid.c | 55 struct xarray xa; member 73 .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC), 82 if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) { in default_alloc() 94 ioasid_data = xa_erase(&default_allocator.xa, ioasid); in default_free() 107 xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC); in ioasid_alloc_allocator() 161 if (xa_empty(&active_allocator->xa)) { in ioasid_register_allocator() 230 WARN_ON(!xa_empty(&pallocator->xa)); in ioasid_unregister_allocator() 275 ioasid_data = xa_load(&active_allocator->xa, ioasid); in ioasid_set_data() 333 xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) { in ioasid_alloc() 358 ioasid_data = xa_load(&active_allocator->xa, ioasid); in ioasid_get() [all …]
|
/drivers/staging/unisys/visorhba/ |
D | visorhba_main.c | 85 struct xarray xa; member 194 static int setup_scsitaskmgmt_handles(struct xarray *xa, struct uiscmdrsp *cmdrsp, in setup_scsitaskmgmt_handles() argument 201 ret = xa_alloc_irq(xa, &id, event, xa_limit_32b, GFP_KERNEL); in setup_scsitaskmgmt_handles() 205 ret = xa_alloc_irq(xa, &id, result, xa_limit_32b, GFP_KERNEL); in setup_scsitaskmgmt_handles() 207 xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle); in setup_scsitaskmgmt_handles() 221 static void cleanup_scsitaskmgmt_handles(struct xarray *xa, in cleanup_scsitaskmgmt_handles() argument 224 xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle); in cleanup_scsitaskmgmt_handles() 225 xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notifyresult_handle); in cleanup_scsitaskmgmt_handles() 265 ret = setup_scsitaskmgmt_handles(&devdata->xa, cmdrsp, in forward_taskmgmt_command() 297 cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp); in forward_taskmgmt_command() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_srq.c | 17 xa_lock(&srq_table->xa); in hns_roce_srq_event() 18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1)); in hns_roce_srq_event() 21 xa_unlock(&srq_table->xa); in hns_roce_srq_event() 103 ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); in alloc_srqc() 135 xa_erase(&srq_table->xa, srq->srqn); in alloc_srqc() 154 xa_erase(&srq_table->xa, srq->srqn); in free_srqc() 452 xa_init(&srq_table->xa); in hns_roce_init_srq_table()
|
D | hns_roce_qp.c | 317 struct xarray *xa = &hr_dev->qp_table_xa; in hns_roce_qp_store() local 323 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); in hns_roce_qp_store() 400 struct xarray *xa = &hr_dev->qp_table_xa; in hns_roce_qp_remove() local 412 xa_lock_irqsave(xa, flags); in hns_roce_qp_remove() 413 __xa_erase(xa, hr_qp->qpn); in hns_roce_qp_remove() 414 xa_unlock_irqrestore(xa, flags); in hns_roce_qp_remove()
|
D | hns_roce_device.h | 526 struct xarray xa; member
|
/drivers/crypto/qat/qat_common/ |
D | qat_asym_algs.c | 85 dma_addr_t xa; member 89 dma_addr_t xa; member 105 char *xa; member 231 if (unlikely(!ctx->xa)) in qat_dh_compute_value() 263 qat_req->in.dh.in.xa = ctx->dma_xa; in qat_dh_compute_value() 268 qat_req->in.dh.in_g2.xa = ctx->dma_xa; in qat_dh_compute_value() 273 qat_req->in.dh.in.xa = ctx->dma_xa; in qat_dh_compute_value() 432 if (ctx->xa) { in qat_dh_clear_ctx() 433 memset(ctx->xa, 0, ctx->p_size); in qat_dh_clear_ctx() 434 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa); in qat_dh_clear_ctx() [all …]
|
/drivers/platform/x86/intel/pmt/ |
D | class.c | 255 ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL); in intel_pmt_dev_register() 308 xa_erase(ns->xa, entry->devid); in intel_pmt_dev_register() 354 xa_erase(ns->xa, entry->devid); in intel_pmt_dev_destroy()
|
D | class.h | 40 struct xarray *xa; member
|
D | telemetry.c | 78 .xa = &telem_array,
|
D | crashlog.c | 252 .xa = &crashlog_array,
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | eswitch.h | 607 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \ argument 608 for (index = 0, entry = xa_find(xa, &index, last, filter); \ 609 entry; entry = xa_find_after(xa, &index, last, filter))
|
/drivers/cdrom/ |
D | cdrom.c | 999 tracks->xa = 0; in cdrom_count_tracks() 1029 tracks->xa++; in cdrom_count_tracks() 1040 tracks->cdi, tracks->xa); in cdrom_count_tracks() 2530 if (!tracks.data && !tracks.cdi && !tracks.xa) in cdrom_ioctl_disc_status() 2538 if (tracks.xa > 0) in cdrom_ioctl_disc_status()
|
/drivers/net/wireless/ath/ath9k/ |
D | ar9003_eeprom.c | 2963 static int interpolate(int x, int xa, int xb, int ya, int yb) in interpolate() argument 2967 bf = 2 * (yb - ya) * (x - xa) / (xb - xa); in interpolate()
|
/drivers/scsi/elx/libefc_sli/ |
D | sli4.c | 2662 struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe; in sli_fc_cqe_parse() local 2665 *r_id = le16_to_cpu(xa->xri); in sli_fc_cqe_parse()
|