Lines Matching refs:gpd
34 #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO) argument
133 struct qmu_gpd *gpd) in gpd_virt_to_dma() argument
139 offset = gpd - gpd_head; in gpd_virt_to_dma()
143 return dma_base + (offset * sizeof(*gpd)); in gpd_virt_to_dma()
146 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) in gpd_ring_init() argument
148 ring->start = gpd; in gpd_ring_init()
149 ring->enqueue = gpd; in gpd_ring_init()
150 ring->dequeue = gpd; in gpd_ring_init()
151 ring->end = gpd + MAX_GPD_NUM - 1; in gpd_ring_init()
157 struct qmu_gpd *gpd = ring->start; in reset_gpd_list() local
159 if (gpd) { in reset_gpd_list()
160 gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); in reset_gpd_list()
161 gpd_ring_init(ring, gpd); in reset_gpd_list()
167 struct qmu_gpd *gpd; in mtu3_gpd_ring_alloc() local
171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); in mtu3_gpd_ring_alloc()
172 if (gpd == NULL) in mtu3_gpd_ring_alloc()
175 gpd_ring_init(ring, gpd); in mtu3_gpd_ring_alloc()
247 struct qmu_gpd *gpd = ring->enqueue; in mtu3_prepare_tx_gpd() local
253 gpd->dw0_info = 0; /* SW own it */ in mtu3_prepare_tx_gpd()
254 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); in mtu3_prepare_tx_gpd()
256 gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length)); in mtu3_prepare_tx_gpd()
262 mep->epnum, gpd, enq, &enq_dma); in mtu3_prepare_tx_gpd()
265 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); in mtu3_prepare_tx_gpd()
267 gpd->dw0_info = cpu_to_le32(ext_addr); in mtu3_prepare_tx_gpd()
271 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP); in mtu3_prepare_tx_gpd()
273 gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP); in mtu3_prepare_tx_gpd()
278 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); in mtu3_prepare_tx_gpd()
280 mreq->gpd = gpd; in mtu3_prepare_tx_gpd()
281 trace_mtu3_prepare_gpd(mep, gpd); in mtu3_prepare_tx_gpd()
290 struct qmu_gpd *gpd = ring->enqueue; in mtu3_prepare_rx_gpd() local
296 gpd->dw0_info = 0; /* SW own it */ in mtu3_prepare_rx_gpd()
297 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); in mtu3_prepare_rx_gpd()
299 gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length)); in mtu3_prepare_rx_gpd()
305 mep->epnum, gpd, enq, &enq_dma); in mtu3_prepare_rx_gpd()
308 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); in mtu3_prepare_rx_gpd()
310 gpd->dw3_info = cpu_to_le32(ext_addr); in mtu3_prepare_rx_gpd()
313 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); in mtu3_prepare_rx_gpd()
315 mreq->gpd = gpd; in mtu3_prepare_rx_gpd()
316 trace_mtu3_prepare_gpd(mep, gpd); in mtu3_prepare_rx_gpd()
474 struct qmu_gpd *gpd = ring->dequeue; in qmu_done_tx() local
485 __func__, epnum, gpd, gpd_current, ring->enqueue); in qmu_done_tx()
487 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { in qmu_done_tx()
491 if (mreq == NULL || mreq->gpd != gpd) { in qmu_done_tx()
497 request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); in qmu_done_tx()
498 trace_mtu3_complete_gpd(mep, gpd); in qmu_done_tx()
501 gpd = advance_deq_gpd(ring); in qmu_done_tx()
514 struct qmu_gpd *gpd = ring->dequeue; in qmu_done_rx() local
524 __func__, epnum, gpd, gpd_current, ring->enqueue); in qmu_done_rx()
526 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { in qmu_done_rx()
530 if (mreq == NULL || mreq->gpd != gpd) { in qmu_done_rx()
536 req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); in qmu_done_rx()
537 trace_mtu3_complete_gpd(mep, gpd); in qmu_done_rx()
540 gpd = advance_deq_gpd(ring); in qmu_done_rx()