/drivers/gpio/ |
D | gpio-aggregator.c | 252 struct gpio_desc **descs; member 270 return gpiod_get_direction(fwd->descs[offset]); in gpio_fwd_get_direction() 277 return gpiod_direction_input(fwd->descs[offset]); in gpio_fwd_direction_input() 285 return gpiod_direction_output(fwd->descs[offset], value); in gpio_fwd_direction_output() 292 return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) in gpio_fwd_get() 293 : gpiod_get_value(fwd->descs[offset]); in gpio_fwd_get() 299 struct gpio_desc **descs = fwd_tmp_descs(fwd); in gpio_fwd_get_multiple() local 306 descs[j++] = fwd->descs[i]; in gpio_fwd_get_multiple() 309 error = gpiod_get_array_value_cansleep(j, descs, NULL, values); in gpio_fwd_get_multiple() 311 error = gpiod_get_array_value(j, descs, NULL, values); in gpio_fwd_get_multiple() [all …]
|
D | gpiolib-devres.c | 34 struct gpio_descs **descs = res; in devm_gpiod_release_array() local 36 gpiod_put_array(*descs); in devm_gpiod_release_array() 216 struct gpio_descs *descs; in devm_gpiod_get_array() local 223 descs = gpiod_get_array(dev, con_id, flags); in devm_gpiod_get_array() 224 if (IS_ERR(descs)) { in devm_gpiod_get_array() 226 return descs; in devm_gpiod_get_array() 229 *dr = descs; in devm_gpiod_get_array() 232 return descs; in devm_gpiod_get_array() 251 struct gpio_descs *descs; in devm_gpiod_get_array_optional() local 253 descs = devm_gpiod_get_array(dev, con_id, flags); in devm_gpiod_get_array_optional() [all …]
|
D | gpiolib.c | 137 return &gdev->descs[gpio - gdev->base]; in gpio_to_desc() 168 return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)]; in gpiochip_get_desc() 184 return desc->gdev->base + (desc - &desc->gdev->descs[0]); in desc_to_gpio() 373 gdev->descs[i].name = gc->names[i]; in gpiochip_set_desc_names() 447 gdev->descs[i].name = names[chip->offset + i]; in gpiochip_set_names() 589 kfree(gdev->descs); in gpiodev_release() 794 gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL); in gpiochip_add_data_with_key() 795 if (!gdev->descs) { in gpiochip_add_data_with_key() 847 gdev->descs[i].gdev = gdev; in gpiochip_add_data_with_key() 877 struct gpio_desc *desc = &gdev->descs[i]; in gpiochip_add_data_with_key() [all …]
|
D | gpio-moxtet.c | 21 static const struct moxtet_gpio_desc descs[] = { variable 124 if (id >= ARRAY_SIZE(descs)) { in moxtet_gpio_probe() 136 chip->desc = &descs[id]; in moxtet_gpio_probe()
|
D | gpiolib-cdev.c | 121 struct gpio_desc *descs[GPIOHANDLES_MAX]; member 224 desc = lh->descs[i]; in linehandle_set_config() 261 lh->num_descs, lh->descs, in linehandle_ioctl_unlocked() 279 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags)) in linehandle_ioctl_unlocked() 293 lh->descs, in linehandle_ioctl_unlocked() 325 if (lh->descs[i]) in linehandle_free() 326 gpiod_free(lh->descs[i]); in linehandle_free() 398 lh->descs[i] = desc; in linehandle_create() 1439 struct gpio_desc **descs; in linereq_get_values() local 1451 descs = &lr->lines[i].desc; in linereq_get_values() [all …]
|
D | gpio-max3191x.c | 336 struct gpio_descs *descs; in devm_gpiod_get_array_optional_count() local 348 descs = devm_gpiod_get_array_optional(dev, con_id, flags); in devm_gpiod_get_array_optional_count() 350 if (IS_ERR(descs)) { in devm_gpiod_get_array_optional_count() 352 con_id, PTR_ERR(descs)); in devm_gpiod_get_array_optional_count() 356 return descs; in devm_gpiod_get_array_optional_count()
|
/drivers/dma/ioat/ |
D | prep.c | 31 static void xor_set_src(struct ioat_raw_descriptor *descs[2], in xor_set_src() 34 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; in xor_set_src() 39 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) in pq_get_src() 41 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; in pq_get_src() 53 static void pq_set_src(struct ioat_raw_descriptor *descs[2], in pq_set_src() 56 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; in pq_set_src() 57 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; in pq_set_src() 192 struct ioat_raw_descriptor *descs[2]; in __ioat_prep_xor_lock() local 207 descs[0] = (struct ioat_raw_descriptor *) xor; in __ioat_prep_xor_lock() 208 descs[1] = (struct ioat_raw_descriptor *) xor_ex; in __ioat_prep_xor_lock() [all …]
|
D | dma.c | 350 pos = (u8 *)ioat_chan->descs[chunk].virt + offs; in ioat_alloc_ring_ent() 351 phys = ioat_chan->descs[chunk].hw + offs; in ioat_alloc_ring_ent() 389 struct ioat_descs *descs = &ioat_chan->descs[i]; in ioat_alloc_ring() local 391 descs->virt = dma_alloc_coherent(to_dev(ioat_chan), in ioat_alloc_ring() 392 IOAT_CHUNK_SIZE, &descs->hw, flags); in ioat_alloc_ring() 393 if (!descs->virt) { in ioat_alloc_ring() 397 descs = &ioat_chan->descs[idx]; in ioat_alloc_ring() 400 descs->virt, descs->hw); in ioat_alloc_ring() 401 descs->virt = NULL; in ioat_alloc_ring() 402 descs->hw = 0; in ioat_alloc_ring() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | counters.c | 242 stats = rdma_alloc_hw_stats_struct(cnts->descs, in do_alloc_stats() 440 type = *(u32 *)cnts->descs[index].priv; in do_get_op_stat() 580 struct rdma_stat_desc *descs, size_t *offsets, in mlx5_ib_fill_counters() argument 592 descs[j].name = names[i].name; in mlx5_ib_fill_counters() 601 descs[j].name = names[i].name; in mlx5_ib_fill_counters() 611 descs[j].name = names[i].name; in mlx5_ib_fill_counters() 621 descs[j].name = names[i].name; in mlx5_ib_fill_counters() 631 descs[j].name = names[i].name; in mlx5_ib_fill_counters() 641 descs[j].name = cong_cnts[i].name; in mlx5_ib_fill_counters() 648 descs[j].name = ext_ppcnt_cnts[i].name; in mlx5_ib_fill_counters() [all …]
|
/drivers/comedi/drivers/ |
D | mite.c | 670 desc = &ring->descs[i]; in mite_init_ring_descriptors() 679 desc = &ring->descs[i]; in mite_init_ring_descriptors() 699 struct mite_dma_desc *descs = ring->descs; in mite_free_dma_descs() local 701 if (descs) { in mite_free_dma_descs() 703 ring->n_links * sizeof(*descs), in mite_free_dma_descs() 704 descs, ring->dma_addr); in mite_free_dma_descs() 705 ring->descs = NULL; in mite_free_dma_descs() 719 struct mite_dma_desc *descs; in mite_buf_change() local 729 descs = dma_alloc_coherent(ring->hw_dev, in mite_buf_change() 730 n_links * sizeof(*descs), in mite_buf_change() [all …]
|
/drivers/net/ethernet/cirrus/ |
D | ep93xx_eth.c | 157 struct ep93xx_descs *descs; member 240 rstat = ep->descs->rstat + entry; in ep93xx_rx() 282 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; in ep93xx_rx() 345 txd = &ep->descs->tdesc[entry]; in ep93xx_xmit() 380 tstat = ep->descs->tstat + entry; in ep93xx_tx_complete() 394 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; in ep93xx_tx_complete() 449 if (!ep->descs) in ep93xx_free_buffers() 455 d = ep->descs->rdesc[i].buf_addr; in ep93xx_free_buffers() 465 d = ep->descs->tdesc[i].buf_addr; in ep93xx_free_buffers() 472 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, in ep93xx_free_buffers() [all …]
|
/drivers/rpmsg/ |
D | qcom_glink_smem.c | 229 __le32 *descs; in qcom_glink_smem_register() local 273 descs = qcom_smem_get(remote_pid, in qcom_glink_smem_register() 275 if (IS_ERR(descs)) { in qcom_glink_smem_register() 277 ret = PTR_ERR(descs); in qcom_glink_smem_register() 287 tx_pipe->tail = &descs[0]; in qcom_glink_smem_register() 288 tx_pipe->head = &descs[1]; in qcom_glink_smem_register() 289 rx_pipe->tail = &descs[2]; in qcom_glink_smem_register() 290 rx_pipe->head = &descs[3]; in qcom_glink_smem_register()
|
/drivers/clk/mvebu/ |
D | clk-corediv.c | 41 const struct clk_corediv_desc *descs; member 195 .descs = mvebu_corediv_desc, 211 .descs = mvebu_corediv_desc, 227 .descs = mvebu_corediv_desc, 239 .descs = mv98dx3236_corediv_desc, 293 corediv[i].desc = soc_desc->descs + i; in mvebu_corediv_clk_init()
|
/drivers/gpu/drm/gud/ |
D | gud_connector.c | 702 struct gud_connector_descriptor_req *descs; in gud_get_connectors() local 706 descs = kmalloc_array(GUD_CONNECTORS_MAX_NUM, sizeof(*descs), GFP_KERNEL); in gud_get_connectors() 707 if (!descs) in gud_get_connectors() 711 descs, GUD_CONNECTORS_MAX_NUM * sizeof(*descs)); in gud_get_connectors() 714 if (!ret || ret % sizeof(*descs)) { in gud_get_connectors() 719 num_connectors = ret / sizeof(*descs); in gud_get_connectors() 722 ret = gud_connector_create(gdrm, i, &descs[i]); in gud_get_connectors() 727 kfree(descs); in gud_get_connectors()
|
/drivers/regulator/ |
D | ltc3589.c | 384 struct regulator_desc *descs; in ltc3589_probe() local 399 descs = ltc3589->regulator_descs; in ltc3589_probe() 400 memcpy(descs, ltc3589_regulators, sizeof(ltc3589_regulators)); in ltc3589_probe() 402 descs[LTC3589_LDO3].fixed_uV = 1800000; in ltc3589_probe() 403 descs[LTC3589_LDO4].volt_table = ltc3589_ldo4; in ltc3589_probe() 405 descs[LTC3589_LDO3].fixed_uV = 2800000; in ltc3589_probe() 406 descs[LTC3589_LDO4].volt_table = ltc3589_12_ldo4; in ltc3589_probe()
|
D | ltc3676.c | 302 struct regulator_desc *descs; in ltc3676_regulator_probe() local 313 descs = ltc3676->regulator_descs; in ltc3676_regulator_probe() 314 memcpy(descs, ltc3676_regulators, sizeof(ltc3676_regulators)); in ltc3676_regulator_probe() 315 descs[LTC3676_LDO3].fixed_uV = 1800000; /* LDO3 is fixed 1.8V */ in ltc3676_regulator_probe()
|
/drivers/net/ethernet/faraday/ |
D | ftmac100.c | 56 struct ftmac100_descs *descs; member 336 return &priv->descs->rxdes[priv->rx_pointer]; in ftmac100_current_rxdes() 602 return &priv->descs->txdes[priv->tx_pointer]; in ftmac100_current_txdes() 607 return &priv->descs->txdes[priv->tx_clean_pointer]; in ftmac100_current_clean_txdes() 730 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; in ftmac100_free_buffers() 742 struct ftmac100_txdes *txdes = &priv->descs->txdes[i]; in ftmac100_free_buffers() 754 priv->descs, priv->descs_dma_addr); in ftmac100_free_buffers() 761 priv->descs = dma_alloc_coherent(priv->dev, in ftmac100_alloc_buffers() 764 if (!priv->descs) in ftmac100_alloc_buffers() 768 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); in ftmac100_alloc_buffers() [all …]
|
/drivers/net/ethernet/intel/ice/ |
D | ice_xsk.c | 1009 static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, in ice_xmit_pkt_batch() argument 1019 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr); in ice_xmit_pkt_batch() 1020 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len); in ice_xmit_pkt_batch() 1024 tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]), in ice_xmit_pkt_batch() 1025 0, descs[i].len, 0); in ice_xmit_pkt_batch() 1027 *total_bytes += descs[i].len; in ice_xmit_pkt_batch() 1040 static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, in ice_fill_tx_hw_ring() argument 1048 ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); in ice_fill_tx_hw_ring() 1050 ice_xmit_pkt(xdp_ring, &descs[i], total_bytes); in ice_fill_tx_hw_ring() 1061 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; in ice_xmit_zc() local [all …]
|
/drivers/media/pci/pt3/ |
D | pt3_dma.c | 140 adap->desc_buf[i].descs, adap->desc_buf[i].b_addr); in pt3_free_dmabuf() 179 adap->desc_buf[i].descs = p; in pt3_alloc_dmabuf() 183 d = &adap->desc_buf[i - 1].descs[DESCS_IN_PAGE - 1]; in pt3_alloc_dmabuf() 189 d = &adap->desc_buf[i].descs[j]; in pt3_alloc_dmabuf()
|
/drivers/scsi/snic/ |
D | vnic_cq_fw.h | 21 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service() 36 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
|
D | vnic_dev.c | 206 memset(ring->descs, 0, ring->size); in svnic_dev_clear_desc_ring() 226 ring->descs = (u8 *)ring->descs_unaligned + in svnic_dev_alloc_desc_ring() 238 if (ring->descs) { in svnic_dev_free_desc_ring() 243 ring->descs = NULL; in svnic_dev_free_desc_ring() 391 dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs; in svnic_dev_init_devcmd2() 392 dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs; in svnic_dev_init_devcmd2()
|
/drivers/scsi/fnic/ |
D | vnic_cq_copy.h | 23 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() 38 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
|
D | vnic_wq_copy.h | 36 struct fcpio_host_req *desc = wq->ring.descs; in vnic_wq_copy_next_desc() 76 struct fcpio_host_req *wq_desc = wq->ring.descs; in vnic_wq_copy_service()
|
/drivers/net/ethernet/actions/ |
D | owl-emac.c | 200 desc = &ring->descs[i]; in owl_emac_ring_prepare_rx() 225 desc = &ring->descs[i]; in owl_emac_ring_prepare_tx() 247 ring->descs[i].status = 0; in owl_emac_ring_unprepare_rx() 266 ring->descs[i].status = 0; in owl_emac_ring_unprepare_tx() 282 ring->descs = dmam_alloc_coherent(dev, in owl_emac_ring_alloc() 285 if (!ring->descs) in owl_emac_ring_alloc() 516 desc = &ring->descs[tx_head]; in owl_emac_setup_frame_xmit() 587 desc = &ring->descs[tx_head]; in owl_emac_ndo_start_xmit() 641 desc = &ring->descs[tx_tail]; in owl_emac_tx_complete_tail() 725 status = READ_ONCE(ring->descs[tx_next].status); in owl_emac_tx_complete() [all …]
|
/drivers/vhost/ |
D | vringh.c | 189 struct vring_desc **descs, int *desc_max) in move_to_indirect() argument 210 *descs = addr; in move_to_indirect() 246 struct vring_desc **descs, int *desc_max) in return_from_indirect() argument 251 *descs = vrh->vring.desc; in return_from_indirect() 306 struct vring_desc desc, *descs; in __vringh_iov() local 311 descs = vrh->vring.desc; in __vringh_iov() 330 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, in __vringh_iov() 333 err = copy(vrh, &desc, &descs[i], sizeof(desc)); in __vringh_iov() 356 &descs, &desc_max); in __vringh_iov() 368 vringh_bad("Descriptor loop in %p", descs); in __vringh_iov() [all …]
|