• Home
  • Raw
  • Download

Lines Matching +full:rx +full:- +full:tx

2  * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
55 * This structure is divided into two-cache aligned parts, the first is only
56 * written through the tx.channel pointer, while the second is only written
57 * through the rx.channel pointer. This delineates ownership of the cache
58 * lines, which is critical to performance and necessary in non-cache coherent
70 } tx; member
76 } rx; member
81 if (!ivc->peer) in tegra_ivc_invalidate()
84 dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN, in tegra_ivc_invalidate()
90 if (!ivc->peer) in tegra_ivc_flush()
93 dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN, in tegra_ivc_flush()
105 u32 tx = READ_ONCE(header->tx.count); in tegra_ivc_empty() local
106 u32 rx = READ_ONCE(header->rx.count); in tegra_ivc_empty() local
109 * Perform an over-full check to prevent denial of service attacks in tegra_ivc_empty()
112 * expected to check for full or over-full conditions. in tegra_ivc_empty()
118 if (tx - rx > ivc->num_frames) in tegra_ivc_empty()
121 return tx == rx; in tegra_ivc_empty()
127 u32 tx = READ_ONCE(header->tx.count); in tegra_ivc_full() local
128 u32 rx = READ_ONCE(header->rx.count); in tegra_ivc_full() local
134 return tx - rx >= ivc->num_frames; in tegra_ivc_full()
140 u32 tx = READ_ONCE(header->tx.count); in tegra_ivc_available() local
141 u32 rx = READ_ONCE(header->rx.count); in tegra_ivc_available() local
145 * over-full situation can lead to denial of service attacks. See the in tegra_ivc_available()
147 * over-full considerations. in tegra_ivc_available()
149 return tx - rx; in tegra_ivc_available()
154 WRITE_ONCE(ivc->tx.channel->tx.count, in tegra_ivc_advance_tx()
155 READ_ONCE(ivc->tx.channel->tx.count) + 1); in tegra_ivc_advance_tx()
157 if (ivc->tx.position == ivc->num_frames - 1) in tegra_ivc_advance_tx()
158 ivc->tx.position = 0; in tegra_ivc_advance_tx()
160 ivc->tx.position++; in tegra_ivc_advance_tx()
165 WRITE_ONCE(ivc->rx.channel->rx.count, in tegra_ivc_advance_rx()
166 READ_ONCE(ivc->rx.channel->rx.count) + 1); in tegra_ivc_advance_rx()
168 if (ivc->rx.position == ivc->num_frames - 1) in tegra_ivc_advance_rx()
169 ivc->rx.position = 0; in tegra_ivc_advance_rx()
171 ivc->rx.position++; in tegra_ivc_advance_rx()
176 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); in tegra_ivc_check_read()
179 * tx.channel->state is set locally, so it is not synchronized with in tegra_ivc_check_read()
183 * asynchronous transition of rx.channel->state to in tegra_ivc_check_read()
186 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) in tegra_ivc_check_read()
187 return -ECONNRESET; in tegra_ivc_check_read()
196 if (!tegra_ivc_empty(ivc, ivc->rx.channel)) in tegra_ivc_check_read()
199 tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); in tegra_ivc_check_read()
201 if (tegra_ivc_empty(ivc, ivc->rx.channel)) in tegra_ivc_check_read()
202 return -ENOSPC; in tegra_ivc_check_read()
209 unsigned int offset = offsetof(struct tegra_ivc_header, rx.count); in tegra_ivc_check_write()
211 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) in tegra_ivc_check_write()
212 return -ECONNRESET; in tegra_ivc_check_write()
214 if (!tegra_ivc_full(ivc, ivc->tx.channel)) in tegra_ivc_check_write()
217 tegra_ivc_invalidate(ivc, ivc->tx.phys + offset); in tegra_ivc_check_write()
219 if (tegra_ivc_full(ivc, ivc->tx.channel)) in tegra_ivc_check_write()
220 return -ENOSPC; in tegra_ivc_check_write()
229 if (WARN_ON(frame >= ivc->num_frames)) in tegra_ivc_frame_virt()
230 return ERR_PTR(-EINVAL); in tegra_ivc_frame_virt()
232 return (void *)(header + 1) + ivc->frame_size * frame; in tegra_ivc_frame_virt()
241 offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame; in tegra_ivc_frame_phys()
252 if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) in tegra_ivc_invalidate_frame()
257 dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE); in tegra_ivc_invalidate_frame()
266 if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) in tegra_ivc_flush_frame()
271 dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE); in tegra_ivc_flush_frame()
274 /* directly peek at the next frame rx'ed */
280 return ERR_PTR(-EINVAL); in tegra_ivc_read_get_next_frame()
287 * Order observation of ivc->rx.position potentially indicating new in tegra_ivc_read_get_next_frame()
292 tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0, in tegra_ivc_read_get_next_frame()
293 ivc->frame_size); in tegra_ivc_read_get_next_frame()
295 return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position); in tegra_ivc_read_get_next_frame()
301 unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); in tegra_ivc_read_advance() local
302 unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); in tegra_ivc_read_advance() local
307 * have already observed the channel non-empty. This check is just to in tegra_ivc_read_advance()
316 tegra_ivc_flush(ivc, ivc->rx.phys + rx); in tegra_ivc_read_advance()
319 * Ensure our write to ivc->rx.position occurs before our read from in tegra_ivc_read_advance()
320 * ivc->tx.position. in tegra_ivc_read_advance()
325 * Notify only upon transition from full to non-full. The available in tegra_ivc_read_advance()
327 * side-effect will be a spurious notification. in tegra_ivc_read_advance()
329 tegra_ivc_invalidate(ivc, ivc->rx.phys + tx); in tegra_ivc_read_advance()
331 if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1) in tegra_ivc_read_advance()
332 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_read_advance()
338 /* directly poke at the next frame to be tx'ed */
347 return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position); in tegra_ivc_write_get_next_frame()
351 /* advance the tx buffer */
354 unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); in tegra_ivc_write_advance() local
355 unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); in tegra_ivc_write_advance() local
362 tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0, in tegra_ivc_write_advance()
363 ivc->frame_size); in tegra_ivc_write_advance()
367 * ivc->tx.position. in tegra_ivc_write_advance()
372 tegra_ivc_flush(ivc, ivc->tx.phys + tx); in tegra_ivc_write_advance()
375 * Ensure our write to ivc->tx.position occurs before our read from in tegra_ivc_write_advance()
376 * ivc->rx.position. in tegra_ivc_write_advance()
381 * Notify only upon transition from empty to non-empty. The available in tegra_ivc_write_advance()
383 * side-effect will be a spurious notification. in tegra_ivc_write_advance()
385 tegra_ivc_invalidate(ivc, ivc->tx.phys + rx); in tegra_ivc_write_advance()
387 if (tegra_ivc_available(ivc, ivc->tx.channel) == 1) in tegra_ivc_write_advance()
388 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_write_advance()
396 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); in tegra_ivc_reset()
398 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC; in tegra_ivc_reset()
399 tegra_ivc_flush(ivc, ivc->tx.phys + offset); in tegra_ivc_reset()
400 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_reset()
406 * IVC State Transition Table - see tegra_ivc_notified()
410 * ----- ------ -----------------------------------
426 unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); in tegra_ivc_notified()
430 tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); in tegra_ivc_notified()
431 state = READ_ONCE(ivc->rx.channel->tx.state); in tegra_ivc_notified()
434 offset = offsetof(struct tegra_ivc_header, tx.count); in tegra_ivc_notified()
438 * clearing tx.channel. in tegra_ivc_notified()
443 * Reset tx.channel counters. The remote end is in the SYNC in tegra_ivc_notified()
447 ivc->tx.channel->tx.count = 0; in tegra_ivc_notified()
448 ivc->rx.channel->rx.count = 0; in tegra_ivc_notified()
450 ivc->tx.position = 0; in tegra_ivc_notified()
451 ivc->rx.position = 0; in tegra_ivc_notified()
463 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK; in tegra_ivc_notified()
464 tegra_ivc_flush(ivc, ivc->tx.phys + offset); in tegra_ivc_notified()
469 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_notified()
471 } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC && in tegra_ivc_notified()
473 offset = offsetof(struct tegra_ivc_header, tx.count); in tegra_ivc_notified()
482 * Reset tx.channel counters. The remote end is in the ACK in tegra_ivc_notified()
486 ivc->tx.channel->tx.count = 0; in tegra_ivc_notified()
487 ivc->rx.channel->rx.count = 0; in tegra_ivc_notified()
489 ivc->tx.position = 0; in tegra_ivc_notified()
490 ivc->rx.position = 0; in tegra_ivc_notified()
503 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; in tegra_ivc_notified()
504 tegra_ivc_flush(ivc, ivc->tx.phys + offset); in tegra_ivc_notified()
509 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_notified()
511 } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) { in tegra_ivc_notified()
512 offset = offsetof(struct tegra_ivc_header, tx.count); in tegra_ivc_notified()
517 * peer state before storing to tx.channel. in tegra_ivc_notified()
527 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; in tegra_ivc_notified()
528 tegra_ivc_flush(ivc, ivc->tx.phys + offset); in tegra_ivc_notified()
533 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_notified()
544 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) in tegra_ivc_notified()
545 return -EAGAIN; in tegra_ivc_notified()
560 pr_err("%s: queue_size (%u) must be %u-byte aligned\n", in tegra_ivc_total_queue_size()
569 static int tegra_ivc_check_params(unsigned long rx, unsigned long tx, in tegra_ivc_check_params() argument
572 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count), in tegra_ivc_check_params()
574 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count), in tegra_ivc_check_params()
581 return -EINVAL; in tegra_ivc_check_params()
586 return -EINVAL; in tegra_ivc_check_params()
593 if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) { in tegra_ivc_check_params()
594 pr_err("IVC channel start not aligned: %#lx\n", rx); in tegra_ivc_check_params()
595 return -EINVAL; in tegra_ivc_check_params()
598 if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) { in tegra_ivc_check_params()
599 pr_err("IVC channel start not aligned: %#lx\n", tx); in tegra_ivc_check_params()
600 return -EINVAL; in tegra_ivc_check_params()
603 if (rx < tx) { in tegra_ivc_check_params()
604 if (rx + frame_size * num_frames > tx) { in tegra_ivc_check_params()
606 rx, frame_size * num_frames, tx); in tegra_ivc_check_params()
607 return -EINVAL; in tegra_ivc_check_params()
610 if (tx + frame_size * num_frames > rx) { in tegra_ivc_check_params()
612 tx, frame_size * num_frames, rx); in tegra_ivc_check_params()
613 return -EINVAL; in tegra_ivc_check_params()
620 int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx, in tegra_ivc_init() argument
621 dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys, in tegra_ivc_init() argument
630 return -EINVAL; in tegra_ivc_init()
637 return -E2BIG; in tegra_ivc_init()
639 err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx, in tegra_ivc_init()
647 ivc->rx.phys = dma_map_single(peer, rx, queue_size, in tegra_ivc_init()
649 if (dma_mapping_error(peer, ivc->rx.phys)) in tegra_ivc_init()
650 return -ENOMEM; in tegra_ivc_init()
652 ivc->tx.phys = dma_map_single(peer, tx, queue_size, in tegra_ivc_init()
654 if (dma_mapping_error(peer, ivc->tx.phys)) { in tegra_ivc_init()
655 dma_unmap_single(peer, ivc->rx.phys, queue_size, in tegra_ivc_init()
657 return -ENOMEM; in tegra_ivc_init()
660 ivc->rx.phys = rx_phys; in tegra_ivc_init()
661 ivc->tx.phys = tx_phys; in tegra_ivc_init()
664 ivc->rx.channel = rx; in tegra_ivc_init()
665 ivc->tx.channel = tx; in tegra_ivc_init()
666 ivc->peer = peer; in tegra_ivc_init()
667 ivc->notify = notify; in tegra_ivc_init()
668 ivc->notify_data = data; in tegra_ivc_init()
669 ivc->frame_size = frame_size; in tegra_ivc_init()
670 ivc->num_frames = num_frames; in tegra_ivc_init()
676 ivc->tx.position = 0; in tegra_ivc_init()
677 ivc->rx.position = 0; in tegra_ivc_init()
685 if (ivc->peer) { in tegra_ivc_cleanup()
686 size_t size = tegra_ivc_total_queue_size(ivc->num_frames * in tegra_ivc_cleanup()
687 ivc->frame_size); in tegra_ivc_cleanup()
689 dma_unmap_single(ivc->peer, ivc->rx.phys, size, in tegra_ivc_cleanup()
691 dma_unmap_single(ivc->peer, ivc->tx.phys, size, in tegra_ivc_cleanup()