• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/mailbox_controller.h>
7 #include <linux/module.h>
8 #include <linux/interrupt.h>
9 #include <linux/gunyah.h>
10 #include <linux/printk.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/wait.h>
14 
15 #define mbox_chan_to_msgq(chan) (container_of(chan->mbox, struct gh_msgq, mbox))
16 
gh_msgq_rx_irq_handler(int irq,void * data)17 static irqreturn_t gh_msgq_rx_irq_handler(int irq, void *data)
18 {
19 	struct gh_msgq *msgq = data;
20 	struct gh_msgq_rx_data rx_data;
21 	enum gh_error gh_error;
22 	bool ready = true;
23 
24 	while (ready) {
25 		gh_error = gh_hypercall_msgq_recv(msgq->rx_ghrsc->capid,
26 				&rx_data.data, sizeof(rx_data.data),
27 				&rx_data.length, &ready);
28 		if (gh_error != GH_ERROR_OK) {
29 			if (gh_error != GH_ERROR_MSGQUEUE_EMPTY)
30 				dev_warn(msgq->mbox.dev, "Failed to receive data: %d\n", gh_error);
31 			break;
32 		}
33 		if (likely(gh_msgq_chan(msgq)->cl))
34 			mbox_chan_received_data(gh_msgq_chan(msgq), &rx_data);
35 	}
36 
37 	return IRQ_HANDLED;
38 }
39 
40 /* Fired when message queue transitions from "full" to "space available" to send messages */
gh_msgq_tx_irq_handler(int irq,void * data)41 static irqreturn_t gh_msgq_tx_irq_handler(int irq, void *data)
42 {
43 	struct gh_msgq *msgq = data;
44 
45 	mbox_chan_txdone(gh_msgq_chan(msgq), 0);
46 
47 	return IRQ_HANDLED;
48 }
49 
50 /* Fired after sending message and hypercall told us there was more space available. */
gh_msgq_txdone_tasklet(struct tasklet_struct * tasklet)51 static void gh_msgq_txdone_tasklet(struct tasklet_struct *tasklet)
52 {
53 	struct gh_msgq *msgq = container_of(tasklet, struct gh_msgq, txdone_tasklet);
54 
55 	mbox_chan_txdone(gh_msgq_chan(msgq), msgq->last_ret);
56 }
57 
gh_msgq_send_data(struct mbox_chan * chan,void * data)58 static int gh_msgq_send_data(struct mbox_chan *chan, void *data)
59 {
60 	struct gh_msgq *msgq = mbox_chan_to_msgq(chan);
61 	struct gh_msgq_tx_data *msgq_data = data;
62 	u64 tx_flags = 0;
63 	enum gh_error gh_error;
64 	bool ready;
65 
66 	if (!msgq->tx_ghrsc)
67 		return -EOPNOTSUPP;
68 
69 	if (msgq_data->push)
70 		tx_flags |= GH_HYPERCALL_MSGQ_TX_FLAGS_PUSH;
71 
72 	gh_error = gh_hypercall_msgq_send(msgq->tx_ghrsc->capid, msgq_data->length, msgq_data->data,
73 						tx_flags, &ready);
74 
75 	/**
76 	 * unlikely because Linux tracks state of msgq and should not try to
77 	 * send message when msgq is full.
78 	 */
79 	if (unlikely(gh_error == GH_ERROR_MSGQUEUE_FULL))
80 		return -EAGAIN;
81 
82 	/**
83 	 * Propagate all other errors to client. If we return error to mailbox
84 	 * framework, then no other messages can be sent and nobody will know
85 	 * to retry this message.
86 	 */
87 	msgq->last_ret = gh_error_remap(gh_error);
88 
89 	/**
90 	 * This message was successfully sent, but message queue isn't ready to
91 	 * accept more messages because it's now full. Mailbox framework
92 	 * requires that we only report that message was transmitted when
93 	 * we're ready to transmit another message. We'll get that in the form
94 	 * of tx IRQ once the other side starts to drain the msgq.
95 	 */
96 	if (gh_error == GH_ERROR_OK) {
97 		if (!ready)
98 			return 0;
99 	} else {
100 		dev_err(msgq->mbox.dev, "Failed to send data: %d (%d)\n", gh_error, msgq->last_ret);
101 	}
102 
103 	/**
104 	 * We can send more messages. Mailbox framework requires that tx done
105 	 * happens asynchronously to sending the message. Gunyah message queues
106 	 * tell us right away on the hypercall return whether we can send more
107 	 * messages. To work around this, defer the txdone to a tasklet.
108 	 */
109 	tasklet_schedule(&msgq->txdone_tasklet);
110 
111 	return 0;
112 }
113 
114 static struct mbox_chan_ops gh_msgq_ops = {
115 	.send_data = gh_msgq_send_data,
116 };
117 
118 /**
119  * gh_msgq_init() - Initialize a Gunyah message queue with an mbox_client
120  * @parent: device parent used for the mailbox controller
121  * @msgq: Pointer to the gh_msgq to initialize
122  * @cl: A mailbox client to bind to the mailbox channel that the message queue creates
123  * @tx_ghrsc: optional, the transmission side of the message queue
124  * @rx_ghrsc: optional, the receiving side of the message queue
125  *
126  * At least one of tx_ghrsc and rx_ghrsc must be not NULL. Most message queue use cases come with
127  * a pair of message queues to facilitate bidirectional communication. When tx_ghrsc is set,
128  * the client can send messages with mbox_send_message(gh_msgq_chan(msgq), msg). When rx_ghrsc
129  * is set, the mbox_client must register an .rx_callback() and the message queue driver will
130  * deliver all available messages upon receiving the RX ready interrupt. The messages should be
131  * consumed or copied by the client right away as the gh_msgq_rx_data will be replaced/destroyed
132  * after the callback.
133  *
134  * Returns - 0 on success, negative otherwise
135  */
gh_msgq_init(struct device * parent,struct gh_msgq * msgq,struct mbox_client * cl,struct gh_resource * tx_ghrsc,struct gh_resource * rx_ghrsc)136 int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client *cl,
137 		 struct gh_resource *tx_ghrsc, struct gh_resource *rx_ghrsc)
138 {
139 	int ret;
140 
141 	/* Must have at least a tx_ghrsc or rx_ghrsc and that they are the right device types */
142 	if ((!tx_ghrsc && !rx_ghrsc) ||
143 	    (tx_ghrsc && tx_ghrsc->type != GH_RESOURCE_TYPE_MSGQ_TX) ||
144 	    (rx_ghrsc && rx_ghrsc->type != GH_RESOURCE_TYPE_MSGQ_RX))
145 		return -EINVAL;
146 
147 	msgq->mbox.dev = parent;
148 	msgq->mbox.ops = &gh_msgq_ops;
149 	msgq->mbox.num_chans = 1;
150 	msgq->mbox.txdone_irq = true;
151 	msgq->mbox.chans = &msgq->mbox_chan;
152 
153 	ret = mbox_controller_register(&msgq->mbox);
154 	if (ret)
155 		return ret;
156 
157 	ret = mbox_bind_client(gh_msgq_chan(msgq), cl);
158 	if (ret)
159 		goto err_mbox;
160 
161 	if (tx_ghrsc) {
162 		msgq->tx_ghrsc = tx_ghrsc;
163 
164 		ret = request_irq(msgq->tx_ghrsc->irq, gh_msgq_tx_irq_handler, 0, "gh_msgq_tx",
165 				msgq);
166 		if (ret)
167 			goto err_tx_ghrsc;
168 
169 		enable_irq_wake(msgq->tx_ghrsc->irq);
170 
171 		tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet);
172 	}
173 
174 	if (rx_ghrsc) {
175 		msgq->rx_ghrsc = rx_ghrsc;
176 
177 		ret = request_threaded_irq(msgq->rx_ghrsc->irq, NULL, gh_msgq_rx_irq_handler,
178 						IRQF_ONESHOT, "gh_msgq_rx", msgq);
179 		if (ret)
180 			goto err_tx_irq;
181 
182 		enable_irq_wake(msgq->rx_ghrsc->irq);
183 	}
184 
185 	return 0;
186 err_tx_irq:
187 	if (msgq->tx_ghrsc)
188 		free_irq(msgq->tx_ghrsc->irq, msgq);
189 
190 	msgq->rx_ghrsc = NULL;
191 err_tx_ghrsc:
192 	msgq->tx_ghrsc = NULL;
193 err_mbox:
194 	mbox_controller_unregister(&msgq->mbox);
195 	return ret;
196 }
197 EXPORT_SYMBOL_GPL(gh_msgq_init);
198 
gh_msgq_remove(struct gh_msgq * msgq)199 void gh_msgq_remove(struct gh_msgq *msgq)
200 {
201 	mbox_free_channel(gh_msgq_chan(msgq));
202 
203 	if (msgq->rx_ghrsc)
204 		free_irq(msgq->rx_ghrsc->irq, msgq);
205 
206 	if (msgq->tx_ghrsc) {
207 		tasklet_kill(&msgq->txdone_tasklet);
208 		free_irq(msgq->tx_ghrsc->irq, msgq);
209 	}
210 
211 	mbox_controller_unregister(&msgq->mbox);
212 
213 	msgq->rx_ghrsc = NULL;
214 	msgq->tx_ghrsc = NULL;
215 }
216 EXPORT_SYMBOL_GPL(gh_msgq_remove);
217 
218 MODULE_LICENSE("GPL");
219 MODULE_DESCRIPTION("Gunyah Message Queue Driver");
220