• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023 MediaTek Inc.
4  */
5 
6 #include <linux/eventfd.h>
7 #include <linux/file.h>
8 #include <linux/syscalls.h>
9 #include <linux/gzvm.h>
10 #include <linux/soc/mediatek/gzvm_drv.h>
11 #include <linux/wait.h>
12 #include <linux/poll.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 
16 struct gzvm_ioevent {
17 	struct list_head list;
18 	__u64 addr;
19 	__u32 len;
20 	struct eventfd_ctx  *evt_ctx;
21 	__u64 datamatch;
22 	bool wildcard;
23 };
24 
25 /**
26  * ioeventfd_check_collision() - Check collision assumes gzvm->ioevent_lock held.
27  * @gzvm: Pointer to gzvm.
28  * @p: Pointer to gzvm_ioevent.
29  *
30  * Return:
31  * * true			- collision found
32  * * false			- no collision
33  */
ioeventfd_check_collision(struct gzvm * gzvm,struct gzvm_ioevent * p)34 static bool ioeventfd_check_collision(struct gzvm *gzvm, struct gzvm_ioevent *p)
35 {
36 	struct gzvm_ioevent *_p;
37 
38 	list_for_each_entry(_p, &gzvm->ioevents, list) {
39 		if (_p->addr == p->addr &&
40 		    (!_p->len || !p->len ||
41 		     (_p->len == p->len &&
42 		      (_p->wildcard || p->wildcard ||
43 		       _p->datamatch == p->datamatch))))
44 			return true;
45 		if (p->addr >= _p->addr && p->addr < _p->addr + _p->len)
46 			return true;
47 	}
48 
49 	return false;
50 }
51 
gzvm_ioevent_release(struct gzvm_ioevent * p)52 static void gzvm_ioevent_release(struct gzvm_ioevent *p)
53 {
54 	eventfd_ctx_put(p->evt_ctx);
55 	list_del(&p->list);
56 	kfree(p);
57 }
58 
gzvm_ioevent_in_range(struct gzvm_ioevent * p,__u64 addr,int len,const void * val)59 static bool gzvm_ioevent_in_range(struct gzvm_ioevent *p, __u64 addr, int len,
60 				  const void *val)
61 {
62 	u64 _val;
63 
64 	if (addr != p->addr)
65 		/* address must be precise for a hit */
66 		return false;
67 
68 	if (!p->len)
69 		/* length = 0 means only look at the address, so always a hit */
70 		return true;
71 
72 	if (len != p->len)
73 		/* address-range must be precise for a hit */
74 		return false;
75 
76 	if (p->wildcard)
77 		/* all else equal, wildcard is always a hit */
78 		return true;
79 
80 	/* otherwise, we have to actually compare the data */
81 
82 	WARN_ON_ONCE(!IS_ALIGNED((unsigned long)val, len));
83 
84 	switch (len) {
85 	case 1:
86 		_val = *(u8 *)val;
87 		break;
88 	case 2:
89 		_val = *(u16 *)val;
90 		break;
91 	case 4:
92 		_val = *(u32 *)val;
93 		break;
94 	case 8:
95 		_val = *(u64 *)val;
96 		break;
97 	default:
98 		return false;
99 	}
100 
101 	return _val == p->datamatch;
102 }
103 
gzvm_deassign_ioeventfd(struct gzvm * gzvm,struct gzvm_ioeventfd * args)104 static int gzvm_deassign_ioeventfd(struct gzvm *gzvm,
105 				   struct gzvm_ioeventfd *args)
106 {
107 	struct gzvm_ioevent *p, *tmp;
108 	struct eventfd_ctx *evt_ctx;
109 	int ret = -ENOENT;
110 	bool wildcard;
111 
112 	evt_ctx = eventfd_ctx_fdget(args->fd);
113 	if (IS_ERR(evt_ctx))
114 		return PTR_ERR(evt_ctx);
115 
116 	wildcard = !(args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH);
117 
118 	mutex_lock(&gzvm->ioevent_lock);
119 	list_for_each_entry_safe(p, tmp, &gzvm->ioevents, list) {
120 		if (p->evt_ctx != evt_ctx  ||
121 		    p->addr != args->addr  ||
122 		    p->len != args->len ||
123 		    p->wildcard != wildcard)
124 			continue;
125 
126 		if (!p->wildcard && p->datamatch != args->datamatch)
127 			continue;
128 
129 		gzvm_ioevent_release(p);
130 		ret = 0;
131 		break;
132 	}
133 
134 	mutex_unlock(&gzvm->ioevent_lock);
135 
136 	/* got in the front of this function */
137 	eventfd_ctx_put(evt_ctx);
138 
139 	return ret;
140 }
141 
gzvm_assign_ioeventfd(struct gzvm * gzvm,struct gzvm_ioeventfd * args)142 static int gzvm_assign_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args)
143 {
144 	struct eventfd_ctx *evt_ctx;
145 	struct gzvm_ioevent *evt;
146 	int ret;
147 
148 	evt_ctx = eventfd_ctx_fdget(args->fd);
149 	if (IS_ERR(evt_ctx))
150 		return PTR_ERR(evt_ctx);
151 
152 	evt = kmalloc(sizeof(*evt), GFP_KERNEL);
153 	if (!evt)
154 		return -ENOMEM;
155 	*evt = (struct gzvm_ioevent) {
156 		.addr = args->addr,
157 		.len = args->len,
158 		.evt_ctx = evt_ctx,
159 	};
160 	if (args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH) {
161 		evt->datamatch = args->datamatch;
162 		evt->wildcard = false;
163 	} else {
164 		evt->wildcard = true;
165 	}
166 
167 	mutex_lock(&gzvm->ioevent_lock);
168 	if (ioeventfd_check_collision(gzvm, evt)) {
169 		ret = -EEXIST;
170 		mutex_unlock(&gzvm->ioevent_lock);
171 		goto err_free;
172 	}
173 
174 	list_add_tail(&evt->list, &gzvm->ioevents);
175 	mutex_unlock(&gzvm->ioevent_lock);
176 
177 	return 0;
178 
179 err_free:
180 	kfree(evt);
181 	eventfd_ctx_put(evt_ctx);
182 	return ret;
183 }
184 
185 /**
186  * gzvm_ioeventfd_check_valid() - Check user arguments is valid.
187  * @args: Pointer to gzvm_ioeventfd.
188  *
189  * Return:
190  * * true if user arguments are valid.
191  * * false if user arguments are invalid.
192  */
gzvm_ioeventfd_check_valid(struct gzvm_ioeventfd * args)193 static bool gzvm_ioeventfd_check_valid(struct gzvm_ioeventfd *args)
194 {
195 	/* must be natural-word sized, or 0 to ignore length */
196 	switch (args->len) {
197 	case 0:
198 	case 1:
199 	case 2:
200 	case 4:
201 	case 8:
202 		break;
203 	default:
204 		return false;
205 	}
206 
207 	/* check for range overflow */
208 	if (args->addr + args->len < args->addr)
209 		return false;
210 
211 	/* check for extra flags that we don't understand */
212 	if (args->flags & ~GZVM_IOEVENTFD_VALID_FLAG_MASK)
213 		return false;
214 
215 	/* ioeventfd with no length can't be combined with DATAMATCH */
216 	if (!args->len && (args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH))
217 		return false;
218 
219 	/* gzvm does not support pio bus ioeventfd */
220 	if (args->flags & GZVM_IOEVENTFD_FLAG_PIO)
221 		return false;
222 
223 	return true;
224 }
225 
226 /**
227  * gzvm_ioeventfd() - Register ioevent to ioevent list.
228  * @gzvm: Pointer to gzvm.
229  * @args: Pointer to gzvm_ioeventfd.
230  *
231  * Return:
232  * * 0			- Success.
233  * * Negative		- Failure.
234  */
gzvm_ioeventfd(struct gzvm * gzvm,struct gzvm_ioeventfd * args)235 int gzvm_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args)
236 {
237 	if (gzvm_ioeventfd_check_valid(args) == false)
238 		return -EINVAL;
239 
240 	if (args->flags & GZVM_IOEVENTFD_FLAG_DEASSIGN)
241 		return gzvm_deassign_ioeventfd(gzvm, args);
242 	return gzvm_assign_ioeventfd(gzvm, args);
243 }
244 
245 /**
246  * gzvm_ioevent_write() - Travers this vm's registered ioeventfd to see if
247  *			  need notifying it.
248  * @vcpu: Pointer to vcpu.
249  * @addr: mmio address.
250  * @len: mmio size.
251  * @val: Pointer to void.
252  *
253  * Return:
254  * * true if this io is already sent to ioeventfd's listener.
255  * * false if we cannot find any ioeventfd registering this mmio write.
256  */
gzvm_ioevent_write(struct gzvm_vcpu * vcpu,__u64 addr,int len,const void * val)257 bool gzvm_ioevent_write(struct gzvm_vcpu *vcpu, __u64 addr, int len,
258 			const void *val)
259 {
260 	struct gzvm_ioevent *e;
261 
262 	mutex_lock(&vcpu->gzvm->ioevent_lock);
263 	list_for_each_entry(e, &vcpu->gzvm->ioevents, list) {
264 		if (gzvm_ioevent_in_range(e, addr, len, val)) {
265 			eventfd_signal(e->evt_ctx);
266 			mutex_unlock(&vcpu->gzvm->ioevent_lock);
267 			return true;
268 		}
269 	}
270 
271 	mutex_unlock(&vcpu->gzvm->ioevent_lock);
272 	return false;
273 }
274 
gzvm_init_ioeventfd(struct gzvm * gzvm)275 int gzvm_init_ioeventfd(struct gzvm *gzvm)
276 {
277 	INIT_LIST_HEAD(&gzvm->ioevents);
278 	mutex_init(&gzvm->ioevent_lock);
279 
280 	return 0;
281 }
282 
gzvm_vm_ioeventfd_release(struct gzvm * gzvm)283 void gzvm_vm_ioeventfd_release(struct gzvm *gzvm)
284 {
285 	struct gzvm_ioevent *p, *tmp;
286 
287 	mutex_lock(&gzvm->ioevent_lock);
288 	list_for_each_entry_safe(p, tmp, &gzvm->ioevents, list)
289 		gzvm_ioevent_release(p);
290 	mutex_unlock(&gzvm->ioevent_lock);
291 }
292