/drivers/gpu/drm/i915/ |
D | intel_ringbuffer.c | 37 intel_ring_initialized(struct intel_engine_cs *ring) in intel_ring_initialized() argument 39 struct drm_device *dev = ring->dev; in intel_ring_initialized() 45 struct intel_context *dctx = ring->default_context; in intel_ring_initialized() 46 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; in intel_ring_initialized() 50 return ring->buffer && ring->buffer->obj; in intel_ring_initialized() 67 bool intel_ring_stopped(struct intel_engine_cs *ring) in intel_ring_stopped() argument 69 struct drm_i915_private *dev_priv = ring->dev->dev_private; in intel_ring_stopped() 70 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); in intel_ring_stopped() 73 void __intel_ring_advance(struct intel_engine_cs *ring) in __intel_ring_advance() argument 75 struct intel_ringbuffer *ringbuf = ring->buffer; in __intel_ring_advance() [all …]
|
D | intel_lrc.c | 274 static void execlists_elsp_write(struct intel_engine_cs *ring, in execlists_elsp_write() argument 278 struct drm_i915_private *dev_priv = ring->dev->dev_private; in execlists_elsp_write() 317 I915_WRITE(RING_ELSP(ring), desc[1]); in execlists_elsp_write() 318 I915_WRITE(RING_ELSP(ring), desc[0]); in execlists_elsp_write() 319 I915_WRITE(RING_ELSP(ring), desc[3]); in execlists_elsp_write() 321 I915_WRITE(RING_ELSP(ring), desc[2]); in execlists_elsp_write() 324 POSTING_READ(RING_EXECLIST_STATUS(ring)); in execlists_elsp_write() 359 static int execlists_submit_context(struct intel_engine_cs *ring, in execlists_submit_context() argument 366 ctx_obj0 = to0->engine[ring->id].state; in execlists_submit_context() 373 ctx_obj1 = to1->engine[ring->id].state; in execlists_submit_context() [all …]
|
D | intel_ringbuffer.h | 32 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) argument 33 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) argument 35 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) argument 36 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) argument 38 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) argument 39 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) argument 41 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) argument 42 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) argument 44 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) argument 45 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) argument [all …]
|
D | i915_gem_context.c | 299 struct intel_engine_cs *ring = &dev_priv->ring[i]; in i915_gem_context_reset() local 300 struct intel_context *lctx = ring->last_context; in i915_gem_context_reset() 307 ring->last_context = NULL; in i915_gem_context_reset() 320 if (WARN_ON(dev_priv->ring[RCS].default_context)) in i915_gem_context_init() 344 struct intel_engine_cs *ring = &dev_priv->ring[i]; in i915_gem_context_init() local 347 ring->default_context = ctx; in i915_gem_context_init() 359 struct intel_context *dctx = dev_priv->ring[RCS].default_context; in i915_gem_context_fini() 374 WARN_ON(!dev_priv->ring[RCS].last_context); in i915_gem_context_fini() 375 if (dev_priv->ring[RCS].last_context == dctx) { in i915_gem_context_fini() 380 dev_priv->ring[RCS].last_context = NULL; in i915_gem_context_fini() [all …]
|
D | i915_gpu_error.c | 38 static const char *ring_str(int ring) in ring_str() argument 40 switch (ring) { in ring_str() 209 err_puts(m, err->ring != -1 ? " " : ""); in print_error_buffers() 210 err_puts(m, ring_str(err->ring)); in print_error_buffers() 245 struct drm_i915_error_ring *ring) in i915_ring_error_state() argument 247 if (!ring->valid) in i915_ring_error_state() 250 err_printf(m, " HEAD: 0x%08x\n", ring->head); in i915_ring_error_state() 251 err_printf(m, " TAIL: 0x%08x\n", ring->tail); in i915_ring_error_state() 252 err_printf(m, " CTL: 0x%08x\n", ring->ctl); in i915_ring_error_state() 253 err_printf(m, " HWS: 0x%08x\n", ring->hws); in i915_ring_error_state() [all …]
|
/drivers/net/wireless/b43legacy/ |
D | dma.c | 45 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, in op32_idx2desc() argument 51 *meta = &(ring->meta[slot]); in op32_idx2desc() 52 desc = ring->descbase; in op32_idx2desc() 58 static void op32_fill_descriptor(struct b43legacy_dmaring *ring, in op32_fill_descriptor() argument 63 struct b43legacy_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor() 70 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor() 75 addr |= ring->dev->dma.translation; in op32_fill_descriptor() 76 ctl = (bufsize - ring->frameoffset) in op32_fill_descriptor() 78 if (slot == ring->nr_slots - 1) in op32_fill_descriptor() 93 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) in op32_poke_tx() argument [all …]
|
/drivers/thunderbolt/ |
D | nhi.c | 22 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") argument 25 static int ring_interrupt_index(struct tb_ring *ring) in ring_interrupt_index() argument 27 int bit = ring->hop; in ring_interrupt_index() 28 if (!ring->is_tx) in ring_interrupt_index() 29 bit += ring->nhi->hop_count; in ring_interrupt_index() 38 static void ring_interrupt_active(struct tb_ring *ring, bool active) in ring_interrupt_active() argument 40 int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32; in ring_interrupt_active() 41 int bit = ring_interrupt_index(ring) & 31; in ring_interrupt_active() 44 old = ioread32(ring->nhi->iobase + reg); in ring_interrupt_active() 50 dev_info(&ring->nhi->pdev->dev, in ring_interrupt_active() [all …]
|
/drivers/gpu/drm/radeon/ |
D | radeon_ring.c | 45 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 58 struct radeon_ring *ring) in radeon_ring_supports_scratch_reg() argument 60 switch (ring->idx) { in radeon_ring_supports_scratch_reg() 78 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) in radeon_ring_free_size() argument 80 uint32_t rptr = radeon_ring_get_rptr(rdev, ring); in radeon_ring_free_size() 83 ring->ring_free_dw = rptr + (ring->ring_size / 4); in radeon_ring_free_size() 84 ring->ring_free_dw -= ring->wptr; in radeon_ring_free_size() 85 ring->ring_free_dw &= ring->ptr_mask; in radeon_ring_free_size() 86 if (!ring->ring_free_dw) { in radeon_ring_free_size() 88 ring->ring_free_dw = ring->ring_size / 4; in radeon_ring_free_size() [all …]
|
D | radeon_fence.c | 62 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) in radeon_fence_write() argument 64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_write() 83 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) in radeon_fence_read() argument 85 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_read() 108 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) in radeon_fence_schedule_check() argument 115 &rdev->fence_drv[ring].lockup_work, in radeon_fence_schedule_check() 131 int ring) in radeon_fence_emit() argument 133 u64 seq = ++rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_emit() 142 (*fence)->ring = ring; in radeon_fence_emit() 144 &rdev->fence_queue.lock, rdev->fence_context + ring, seq); in radeon_fence_emit() [all …]
|
D | r600_dma.c | 52 struct radeon_ring *ring) in r600_dma_get_rptr() argument 57 rptr = rdev->wb.wb[ring->rptr_offs/4]; in r600_dma_get_rptr() 73 struct radeon_ring *ring) in r600_dma_get_wptr() argument 87 struct radeon_ring *ring) in r600_dma_set_wptr() argument 89 WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc); in r600_dma_set_wptr() 109 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; in r600_dma_stop() 122 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in r600_dma_resume() local 131 rb_bufsz = order_base_2(ring->ring_size / 4); in r600_dma_resume() 151 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); in r600_dma_resume() 167 ring->wptr = 0; in r600_dma_resume() [all …]
|
D | uvd_v1_0.c | 40 struct radeon_ring *ring) in uvd_v1_0_get_rptr() argument 54 struct radeon_ring *ring) in uvd_v1_0_get_wptr() argument 68 struct radeon_ring *ring) in uvd_v1_0_set_wptr() argument 70 WREG32(UVD_RBC_RB_WPTR, ring->wptr); in uvd_v1_0_set_wptr() 84 struct radeon_ring *ring = &rdev->ring[fence->ring]; in uvd_v1_0_fence_emit() local 85 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; in uvd_v1_0_fence_emit() 87 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); in uvd_v1_0_fence_emit() 88 radeon_ring_write(ring, addr & 0xffffffff); in uvd_v1_0_fence_emit() 89 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); in uvd_v1_0_fence_emit() 90 radeon_ring_write(ring, fence->seq); in uvd_v1_0_fence_emit() [all …]
|
D | cik_sdma.c | 64 struct radeon_ring *ring) in cik_sdma_get_rptr() argument 69 rptr = rdev->wb.wb[ring->rptr_offs/4]; in cik_sdma_get_rptr() 71 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_get_rptr() 91 struct radeon_ring *ring) in cik_sdma_get_wptr() argument 95 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_get_wptr() 112 struct radeon_ring *ring) in cik_sdma_set_wptr() argument 116 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_set_wptr() 121 WREG32(reg, (ring->wptr << 2) & 0x3fffc); in cik_sdma_set_wptr() 136 struct radeon_ring *ring = &rdev->ring[ib->ring]; in cik_sdma_ring_ib_execute() local 140 u32 next_rptr = ring->wptr + 5; in cik_sdma_ring_ib_execute() [all …]
|
D | evergreen_dma.c | 44 struct radeon_ring *ring = &rdev->ring[fence->ring]; in evergreen_dma_fence_ring_emit() local 45 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in evergreen_dma_fence_ring_emit() 47 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); in evergreen_dma_fence_ring_emit() 48 radeon_ring_write(ring, addr & 0xfffffffc); in evergreen_dma_fence_ring_emit() 49 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); in evergreen_dma_fence_ring_emit() 50 radeon_ring_write(ring, fence->seq); in evergreen_dma_fence_ring_emit() 52 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); in evergreen_dma_fence_ring_emit() 54 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); in evergreen_dma_fence_ring_emit() 55 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); in evergreen_dma_fence_ring_emit() 56 radeon_ring_write(ring, 1); in evergreen_dma_fence_ring_emit() [all …]
|
D | vce_v1_0.c | 43 struct radeon_ring *ring) in vce_v1_0_get_rptr() argument 45 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_get_rptr() 60 struct radeon_ring *ring) in vce_v1_0_get_wptr() argument 62 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_get_wptr() 77 struct radeon_ring *ring) in vce_v1_0_set_wptr() argument 79 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_set_wptr() 80 WREG32(VCE_RB_WPTR, ring->wptr); in vce_v1_0_set_wptr() 82 WREG32(VCE_RB_WPTR2, ring->wptr); in vce_v1_0_set_wptr() 94 struct radeon_ring *ring; in vce_v1_0_start() local 100 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; in vce_v1_0_start() [all …]
|
D | ni_dma.c | 54 struct radeon_ring *ring) in cayman_dma_get_rptr() argument 59 rptr = rdev->wb.wb[ring->rptr_offs/4]; in cayman_dma_get_rptr() 61 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_get_rptr() 81 struct radeon_ring *ring) in cayman_dma_get_wptr() argument 85 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_get_wptr() 102 struct radeon_ring *ring) in cayman_dma_set_wptr() argument 106 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_set_wptr() 111 WREG32(reg, (ring->wptr << 2) & 0x3fffc); in cayman_dma_set_wptr() 125 struct radeon_ring *ring = &rdev->ring[ib->ring]; in cayman_dma_ring_ib_execute() local 128 u32 next_rptr = ring->wptr + 4; in cayman_dma_ring_ib_execute() [all …]
|
D | radeon_trace.h | 34 __field(u32, ring) 40 __entry->ring = p->ring; 43 p->rdev, p->ring); 46 __entry->ring, __entry->dw, 51 TP_PROTO(unsigned vmid, int ring), 52 TP_ARGS(vmid, ring), 55 __field(u32, ring) 60 __entry->ring = ring; 62 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) 108 TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), [all …]
|
D | uvd_v2_2.c | 42 struct radeon_ring *ring = &rdev->ring[fence->ring]; in uvd_v2_2_fence_emit() local 43 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; in uvd_v2_2_fence_emit() 45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); in uvd_v2_2_fence_emit() 46 radeon_ring_write(ring, fence->seq); in uvd_v2_2_fence_emit() 47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); in uvd_v2_2_fence_emit() 48 radeon_ring_write(ring, lower_32_bits(addr)); in uvd_v2_2_fence_emit() 49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); in uvd_v2_2_fence_emit() 50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); in uvd_v2_2_fence_emit() 51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); in uvd_v2_2_fence_emit() 52 radeon_ring_write(ring, 0); in uvd_v2_2_fence_emit() [all …]
|
/drivers/net/wireless/b43/ |
D | dma.c | 85 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, in op32_idx2desc() argument 91 *meta = &(ring->meta[slot]); in op32_idx2desc() 92 desc = ring->descbase; in op32_idx2desc() 98 static void op32_fill_descriptor(struct b43_dmaring *ring, in op32_fill_descriptor() argument 103 struct b43_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor() 110 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor() 112 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); in op32_fill_descriptor() 113 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); in op32_fill_descriptor() 116 if (slot == ring->nr_slots - 1) in op32_fill_descriptor() 131 static void op32_poke_tx(struct b43_dmaring *ring, int slot) in op32_poke_tx() argument [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | en_tx.c | 53 struct mlx4_en_tx_ring *ring; in mlx4_en_create_tx_ring() local 57 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); in mlx4_en_create_tx_ring() 58 if (!ring) { in mlx4_en_create_tx_ring() 59 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in mlx4_en_create_tx_ring() 60 if (!ring) { in mlx4_en_create_tx_ring() 66 ring->size = size; in mlx4_en_create_tx_ring() 67 ring->size_mask = size - 1; in mlx4_en_create_tx_ring() 68 ring->stride = stride; in mlx4_en_create_tx_ring() 69 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; in mlx4_en_create_tx_ring() 72 ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); in mlx4_en_create_tx_ring() [all …]
|
D | en_rx.c | 151 struct mlx4_en_rx_ring *ring) in mlx4_en_init_allocator() argument 159 if (mlx4_alloc_pages(priv, &ring->page_alloc[i], in mlx4_en_init_allocator() 169 page_alloc = &ring->page_alloc[i]; in mlx4_en_init_allocator() 181 struct mlx4_en_rx_ring *ring) in mlx4_en_destroy_allocator() argument 189 page_alloc = &ring->page_alloc[i]; in mlx4_en_destroy_allocator() 205 struct mlx4_en_rx_ring *ring, int index) in mlx4_en_init_rx_desc() argument 207 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; in mlx4_en_init_rx_desc() 221 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; in mlx4_en_init_rx_desc() 230 struct mlx4_en_rx_ring *ring, int index, in mlx4_en_prepare_rx_desc() argument 233 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); in mlx4_en_prepare_rx_desc() [all …]
|
/drivers/crypto/qat/qat_common/ |
D | adf_transport.c | 80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) in adf_reserve_ring() argument 83 if (bank->ring_mask & (1 << ring)) { in adf_reserve_ring() 87 bank->ring_mask |= (1 << ring); in adf_reserve_ring() 92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) in adf_unreserve_ring() argument 95 bank->ring_mask &= ~(1 << ring); in adf_unreserve_ring() 99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) in adf_enable_ring_irq() argument 102 bank->irq_mask |= (1 << ring); in adf_enable_ring_irq() 109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) in adf_disable_ring_irq() argument 112 bank->irq_mask &= ~(1 << ring); in adf_disable_ring_irq() 117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) in adf_send_message() argument [all …]
|
D | adf_transport_debug.c | 59 struct adf_etr_ring_data *ring = sfile->private; in adf_ring_start() local 65 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / in adf_ring_start() 66 ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) in adf_ring_start() 69 return ring->base_addr + in adf_ring_start() 70 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); in adf_ring_start() 75 struct adf_etr_ring_data *ring = sfile->private; in adf_ring_next() local 77 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / in adf_ring_next() 78 ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) in adf_ring_next() 81 return ring->base_addr + in adf_ring_next() 82 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); in adf_ring_next() [all …]
|
/drivers/gpu/drm/msm/ |
D | msm_ringbuffer.c | 23 struct msm_ringbuffer *ring; in msm_ringbuffer_new() local 29 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in msm_ringbuffer_new() 30 if (!ring) { in msm_ringbuffer_new() 35 ring->gpu = gpu; in msm_ringbuffer_new() 36 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); in msm_ringbuffer_new() 37 if (IS_ERR(ring->bo)) { in msm_ringbuffer_new() 38 ret = PTR_ERR(ring->bo); in msm_ringbuffer_new() 39 ring->bo = NULL; in msm_ringbuffer_new() 43 ring->start = msm_gem_vaddr_locked(ring->bo); in msm_ringbuffer_new() 44 ring->end = ring->start + (size / 4); in msm_ringbuffer_new() [all …]
|
/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-desc.c | 123 struct xgbe_ring *ring) in xgbe_free_ring() argument 128 if (!ring) in xgbe_free_ring() 131 if (ring->rdata) { in xgbe_free_ring() 132 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_free_ring() 133 rdata = XGBE_GET_DESC_DATA(ring, i); in xgbe_free_ring() 137 kfree(ring->rdata); in xgbe_free_ring() 138 ring->rdata = NULL; in xgbe_free_ring() 141 if (ring->rdesc) { in xgbe_free_ring() 144 ring->rdesc_count), in xgbe_free_ring() 145 ring->rdesc, ring->rdesc_dma); in xgbe_free_ring() [all …]
|
/drivers/net/ethernet/broadcom/ |
D | bgmac.c | 48 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) in bgmac_dma_tx_reset() argument 53 if (!ring->mmio_base) in bgmac_dma_tx_reset() 60 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, in bgmac_dma_tx_reset() 63 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_reset() 75 ring->mmio_base, val); in bgmac_dma_tx_reset() 78 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); in bgmac_dma_tx_reset() 80 ring->mmio_base + BGMAC_DMA_TX_STATUS, in bgmac_dma_tx_reset() 84 ring->mmio_base); in bgmac_dma_tx_reset() 86 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_reset() 89 ring->mmio_base); in bgmac_dma_tx_reset() [all …]
|