• Home
  • Raw
  • Download

Lines Matching refs:vioch

114 static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,  in scmi_vio_channel_ready()  argument
119 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_ready()
120 cinfo->transport_info = vioch; in scmi_vio_channel_ready()
122 vioch->cinfo = cinfo; in scmi_vio_channel_ready()
123 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_ready()
125 refcount_set(&vioch->users, 1); in scmi_vio_channel_ready()
128 static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) in scmi_vio_channel_acquire() argument
130 return refcount_inc_not_zero(&vioch->users); in scmi_vio_channel_acquire()
133 static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) in scmi_vio_channel_release() argument
135 if (refcount_dec_and_test(&vioch->users)) { in scmi_vio_channel_release()
138 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_release()
139 if (vioch->shutdown_done) { in scmi_vio_channel_release()
140 vioch->cinfo = NULL; in scmi_vio_channel_release()
141 complete(vioch->shutdown_done); in scmi_vio_channel_release()
143 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_release()
147 static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) in scmi_vio_channel_cleanup_sync() argument
156 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
157 if (!vioch->cinfo || vioch->shutdown_done) { in scmi_vio_channel_cleanup_sync()
158 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
162 vioch->shutdown_done = &vioch_shutdown_done; in scmi_vio_channel_cleanup_sync()
163 if (!vioch->is_rx && vioch->deferred_tx_wq) in scmi_vio_channel_cleanup_sync()
165 vioch->deferred_tx_wq = NULL; in scmi_vio_channel_cleanup_sync()
166 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
168 scmi_vio_channel_release(vioch); in scmi_vio_channel_cleanup_sync()
171 wait_for_completion(vioch->shutdown_done); in scmi_vio_channel_cleanup_sync()
176 scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) in scmi_virtio_get_free_msg() argument
181 spin_lock_irqsave(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
182 if (list_empty(&vioch->free_list)) { in scmi_virtio_get_free_msg()
183 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
187 msg = list_first_entry(&vioch->free_list, typeof(*msg), list); in scmi_virtio_get_free_msg()
189 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
204 static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, in scmi_vio_msg_release() argument
213 spin_lock_irqsave(&vioch->free_lock, flags); in scmi_vio_msg_release()
214 list_add_tail(&msg->list, &vioch->free_list); in scmi_vio_msg_release()
215 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_vio_msg_release()
226 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, in scmi_vio_feed_vq_rx() argument
232 struct device *dev = &vioch->vqueue->vdev->dev; in scmi_vio_feed_vq_rx()
236 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_feed_vq_rx()
238 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); in scmi_vio_feed_vq_rx()
242 virtqueue_kick(vioch->vqueue); in scmi_vio_feed_vq_rx()
244 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_feed_vq_rx()
253 static void scmi_finalize_message(struct scmi_vio_channel *vioch, in scmi_finalize_message() argument
256 if (vioch->is_rx) in scmi_finalize_message()
257 scmi_vio_feed_vq_rx(vioch, msg); in scmi_finalize_message()
259 scmi_vio_msg_release(vioch, msg); in scmi_finalize_message()
266 struct scmi_vio_channel *vioch; in scmi_vio_complete_cb() local
272 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; in scmi_vio_complete_cb()
275 if (!scmi_vio_channel_acquire(vioch)) in scmi_vio_complete_cb()
278 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_complete_cb()
287 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_complete_cb()
288 scmi_vio_channel_release(vioch); in scmi_vio_complete_cb()
293 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_complete_cb()
297 scmi_rx_callback(vioch->cinfo, in scmi_vio_complete_cb()
300 scmi_finalize_message(vioch, msg); in scmi_vio_complete_cb()
310 scmi_vio_channel_release(vioch); in scmi_vio_complete_cb()
317 struct scmi_vio_channel *vioch; in scmi_vio_deferred_tx_worker() local
320 vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); in scmi_vio_deferred_tx_worker()
322 if (!scmi_vio_channel_acquire(vioch)) in scmi_vio_deferred_tx_worker()
331 spin_lock_irqsave(&vioch->pending_lock, flags); in scmi_vio_deferred_tx_worker()
334 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { in scmi_vio_deferred_tx_worker()
342 scmi_rx_callback(vioch->cinfo, in scmi_vio_deferred_tx_worker()
346 scmi_vio_msg_release(vioch, msg); in scmi_vio_deferred_tx_worker()
349 spin_unlock_irqrestore(&vioch->pending_lock, flags); in scmi_vio_deferred_tx_worker()
352 scmi_vio_complete_cb(vioch->vqueue); in scmi_vio_deferred_tx_worker()
354 scmi_vio_channel_release(vioch); in scmi_vio_deferred_tx_worker()
366 struct scmi_vio_channel *vioch = base_cinfo->transport_info; in virtio_get_max_msg() local
368 return vioch->max_msg; in virtio_get_max_msg()
390 struct scmi_vio_channel *channels, *vioch = NULL; in virtio_chan_available() local
399 vioch = &channels[VIRTIO_SCMI_VQ_TX]; in virtio_chan_available()
403 vioch = &channels[VIRTIO_SCMI_VQ_RX]; in virtio_chan_available()
409 return vioch && !vioch->cinfo; in virtio_chan_available()
420 struct scmi_vio_channel *vioch; in virtio_chan_setup() local
427 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; in virtio_chan_setup()
430 if (tx && !vioch->deferred_tx_wq) { in virtio_chan_setup()
433 vioch->deferred_tx_wq = in virtio_chan_setup()
437 if (!vioch->deferred_tx_wq) in virtio_chan_setup()
441 vioch->deferred_tx_wq); in virtio_chan_setup()
445 INIT_WORK(&vioch->deferred_tx_work, in virtio_chan_setup()
449 for (i = 0; i < vioch->max_msg; i++) { in virtio_chan_setup()
471 scmi_finalize_message(vioch, msg); in virtio_chan_setup()
474 scmi_vio_channel_ready(vioch, cinfo); in virtio_chan_setup()
482 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_chan_free() local
489 virtio_break_device(vioch->vqueue->vdev); in virtio_chan_free()
490 scmi_vio_channel_cleanup_sync(vioch); in virtio_chan_free()
498 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_send_message() local
506 if (!scmi_vio_channel_acquire(vioch)) in virtio_send_message()
509 msg = scmi_virtio_get_free_msg(vioch); in virtio_send_message()
511 scmi_vio_channel_release(vioch); in virtio_send_message()
520 spin_lock_irqsave(&vioch->lock, flags); in virtio_send_message()
529 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); in virtio_send_message()
537 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); in virtio_send_message()
539 dev_err(vioch->cinfo->dev, in virtio_send_message()
542 virtqueue_kick(vioch->vqueue); in virtio_send_message()
544 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_send_message()
550 scmi_vio_msg_release(vioch, msg); in virtio_send_message()
551 scmi_vio_msg_release(vioch, msg); in virtio_send_message()
554 scmi_vio_channel_release(vioch); in virtio_send_message()
616 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_mark_txdone() local
619 if (!msg || !scmi_vio_channel_acquire(vioch)) in virtio_mark_txdone()
626 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { in virtio_mark_txdone()
627 scmi_vio_channel_release(vioch); in virtio_mark_txdone()
634 scmi_vio_msg_release(vioch, msg); in virtio_mark_txdone()
639 scmi_vio_channel_release(vioch); in virtio_mark_txdone()
686 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_poll_done() local
709 if (!scmi_vio_channel_acquire(vioch)) in virtio_poll_done()
713 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); in virtio_poll_done()
715 scmi_vio_channel_release(vioch); in virtio_poll_done()
719 spin_lock_irqsave(&vioch->lock, flags); in virtio_poll_done()
720 virtqueue_disable_cb(vioch->vqueue); in virtio_poll_done()
726 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { in virtio_poll_done()
760 spin_lock(&vioch->pending_lock); in virtio_poll_done()
762 &vioch->pending_cmds_list); in virtio_poll_done()
763 spin_unlock(&vioch->pending_lock); in virtio_poll_done()
778 pending = !virtqueue_enable_cb(vioch->vqueue); in virtio_poll_done()
780 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); in virtio_poll_done()
781 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); in virtio_poll_done()
784 if (vioch->deferred_tx_wq && (any_prefetched || pending)) in virtio_poll_done()
785 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); in virtio_poll_done()
787 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_poll_done()
789 scmi_vio_channel_release(vioch); in virtio_poll_done()