1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 NVIDIA Corporation */
3
4 #include <linux/host1x.h>
5 #include <linux/iommu.h>
6 #include <linux/list.h>
7
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_utils.h>
11
12 #include "drm.h"
13 #include "uapi.h"
14
tegra_drm_mapping_release(struct kref * ref)15 static void tegra_drm_mapping_release(struct kref *ref)
16 {
17 struct tegra_drm_mapping *mapping =
18 container_of(ref, struct tegra_drm_mapping, ref);
19
20 if (mapping->sgt)
21 dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
22 DMA_ATTR_SKIP_CPU_SYNC);
23
24 host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
25 host1x_bo_put(mapping->bo);
26
27 kfree(mapping);
28 }
29
tegra_drm_mapping_put(struct tegra_drm_mapping * mapping)30 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
31 {
32 kref_put(&mapping->ref, tegra_drm_mapping_release);
33 }
34
tegra_drm_channel_context_close(struct tegra_drm_context * context)35 static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
36 {
37 struct tegra_drm_mapping *mapping;
38 unsigned long id;
39
40 xa_for_each(&context->mappings, id, mapping)
41 tegra_drm_mapping_put(mapping);
42
43 xa_destroy(&context->mappings);
44
45 host1x_channel_put(context->channel);
46
47 kfree(context);
48 }
49
tegra_drm_uapi_close_file(struct tegra_drm_file * file)50 void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
51 {
52 struct tegra_drm_context *context;
53 struct host1x_syncpt *sp;
54 unsigned long id;
55
56 xa_for_each(&file->contexts, id, context)
57 tegra_drm_channel_context_close(context);
58
59 xa_for_each(&file->syncpoints, id, sp)
60 host1x_syncpt_put(sp);
61
62 xa_destroy(&file->contexts);
63 xa_destroy(&file->syncpoints);
64 }
65
tegra_drm_find_client(struct tegra_drm * tegra,u32 class)66 static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
67 {
68 struct tegra_drm_client *client;
69
70 list_for_each_entry(client, &tegra->clients, list)
71 if (client->base.class == class)
72 return client;
73
74 return NULL;
75 }
76
tegra_drm_ioctl_channel_open(struct drm_device * drm,void * data,struct drm_file * file)77 int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
78 {
79 struct tegra_drm_file *fpriv = file->driver_priv;
80 struct tegra_drm *tegra = drm->dev_private;
81 struct drm_tegra_channel_open *args = data;
82 struct tegra_drm_client *client = NULL;
83 struct tegra_drm_context *context;
84 int err;
85
86 if (args->flags)
87 return -EINVAL;
88
89 context = kzalloc(sizeof(*context), GFP_KERNEL);
90 if (!context)
91 return -ENOMEM;
92
93 client = tegra_drm_find_client(tegra, args->host1x_class);
94 if (!client) {
95 err = -ENODEV;
96 goto free;
97 }
98
99 if (client->shared_channel) {
100 context->channel = host1x_channel_get(client->shared_channel);
101 } else {
102 context->channel = host1x_channel_request(&client->base);
103 if (!context->channel) {
104 err = -EBUSY;
105 goto free;
106 }
107 }
108
109 err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
110 GFP_KERNEL);
111 if (err < 0)
112 goto put_channel;
113
114 context->client = client;
115 xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
116
117 args->version = client->version;
118 args->capabilities = 0;
119
120 if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
121 args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
122
123 return 0;
124
125 put_channel:
126 host1x_channel_put(context->channel);
127 free:
128 kfree(context);
129
130 return err;
131 }
132
tegra_drm_ioctl_channel_close(struct drm_device * drm,void * data,struct drm_file * file)133 int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
134 {
135 struct tegra_drm_file *fpriv = file->driver_priv;
136 struct drm_tegra_channel_close *args = data;
137 struct tegra_drm_context *context;
138
139 mutex_lock(&fpriv->lock);
140
141 context = xa_load(&fpriv->contexts, args->context);
142 if (!context) {
143 mutex_unlock(&fpriv->lock);
144 return -EINVAL;
145 }
146
147 xa_erase(&fpriv->contexts, args->context);
148
149 mutex_unlock(&fpriv->lock);
150
151 tegra_drm_channel_context_close(context);
152
153 return 0;
154 }
155
tegra_drm_ioctl_channel_map(struct drm_device * drm,void * data,struct drm_file * file)156 int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
157 {
158 struct tegra_drm_file *fpriv = file->driver_priv;
159 struct drm_tegra_channel_map *args = data;
160 struct tegra_drm_mapping *mapping;
161 struct tegra_drm_context *context;
162 int err = 0;
163
164 if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
165 return -EINVAL;
166
167 mutex_lock(&fpriv->lock);
168
169 context = xa_load(&fpriv->contexts, args->context);
170 if (!context) {
171 mutex_unlock(&fpriv->lock);
172 return -EINVAL;
173 }
174
175 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
176 if (!mapping) {
177 err = -ENOMEM;
178 goto unlock;
179 }
180
181 kref_init(&mapping->ref);
182
183 mapping->dev = context->client->base.dev;
184 mapping->bo = tegra_gem_lookup(file, args->handle);
185 if (!mapping->bo) {
186 err = -EINVAL;
187 goto unlock;
188 }
189
190 if (context->client->base.group) {
191 /* IOMMU domain managed directly using IOMMU API */
192 host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova);
193 } else {
194 switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
195 case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
196 mapping->direction = DMA_BIDIRECTIONAL;
197 break;
198
199 case DRM_TEGRA_CHANNEL_MAP_WRITE:
200 mapping->direction = DMA_FROM_DEVICE;
201 break;
202
203 case DRM_TEGRA_CHANNEL_MAP_READ:
204 mapping->direction = DMA_TO_DEVICE;
205 break;
206
207 default:
208 return -EINVAL;
209 }
210
211 mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL);
212 if (IS_ERR(mapping->sgt)) {
213 err = PTR_ERR(mapping->sgt);
214 goto put_gem;
215 }
216
217 err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction,
218 DMA_ATTR_SKIP_CPU_SYNC);
219 if (err)
220 goto unpin;
221
222 mapping->iova = sg_dma_address(mapping->sgt->sgl);
223 }
224
225 mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
226
227 err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
228 GFP_KERNEL);
229 if (err < 0)
230 goto unmap;
231
232 mutex_unlock(&fpriv->lock);
233
234 return 0;
235
236 unmap:
237 if (mapping->sgt) {
238 dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
239 DMA_ATTR_SKIP_CPU_SYNC);
240 }
241 unpin:
242 host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
243 put_gem:
244 host1x_bo_put(mapping->bo);
245 kfree(mapping);
246 unlock:
247 mutex_unlock(&fpriv->lock);
248 return err;
249 }
250
tegra_drm_ioctl_channel_unmap(struct drm_device * drm,void * data,struct drm_file * file)251 int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
252 {
253 struct tegra_drm_file *fpriv = file->driver_priv;
254 struct drm_tegra_channel_unmap *args = data;
255 struct tegra_drm_mapping *mapping;
256 struct tegra_drm_context *context;
257
258 mutex_lock(&fpriv->lock);
259
260 context = xa_load(&fpriv->contexts, args->context);
261 if (!context) {
262 mutex_unlock(&fpriv->lock);
263 return -EINVAL;
264 }
265
266 mapping = xa_erase(&context->mappings, args->mapping);
267
268 mutex_unlock(&fpriv->lock);
269
270 if (!mapping)
271 return -EINVAL;
272
273 tegra_drm_mapping_put(mapping);
274 return 0;
275 }
276
tegra_drm_ioctl_syncpoint_allocate(struct drm_device * drm,void * data,struct drm_file * file)277 int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
278 {
279 struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
280 struct tegra_drm_file *fpriv = file->driver_priv;
281 struct drm_tegra_syncpoint_allocate *args = data;
282 struct host1x_syncpt *sp;
283 int err;
284
285 if (args->id)
286 return -EINVAL;
287
288 sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
289 if (!sp)
290 return -EBUSY;
291
292 args->id = host1x_syncpt_id(sp);
293
294 err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
295 if (err) {
296 host1x_syncpt_put(sp);
297 return err;
298 }
299
300 return 0;
301 }
302
tegra_drm_ioctl_syncpoint_free(struct drm_device * drm,void * data,struct drm_file * file)303 int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
304 {
305 struct tegra_drm_file *fpriv = file->driver_priv;
306 struct drm_tegra_syncpoint_allocate *args = data;
307 struct host1x_syncpt *sp;
308
309 mutex_lock(&fpriv->lock);
310 sp = xa_erase(&fpriv->syncpoints, args->id);
311 mutex_unlock(&fpriv->lock);
312
313 if (!sp)
314 return -EINVAL;
315
316 host1x_syncpt_put(sp);
317
318 return 0;
319 }
320
tegra_drm_ioctl_syncpoint_wait(struct drm_device * drm,void * data,struct drm_file * file)321 int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
322 {
323 struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
324 struct drm_tegra_syncpoint_wait *args = data;
325 signed long timeout_jiffies;
326 struct host1x_syncpt *sp;
327
328 if (args->padding != 0)
329 return -EINVAL;
330
331 sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
332 if (!sp)
333 return -EINVAL;
334
335 timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
336
337 return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
338 }
339