• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/kref.h>
6 #include <linux/uaccess.h>
7 
8 #include "msm_gpu.h"
9 
msm_submitqueue_destroy(struct kref * kref)10 void msm_submitqueue_destroy(struct kref *kref)
11 {
12 	struct msm_gpu_submitqueue *queue = container_of(kref,
13 		struct msm_gpu_submitqueue, ref);
14 
15 	msm_file_private_put(queue->ctx);
16 
17 	kfree(queue);
18 }
19 
msm_submitqueue_get(struct msm_file_private * ctx,u32 id)20 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
21 		u32 id)
22 {
23 	struct msm_gpu_submitqueue *entry;
24 
25 	if (!ctx)
26 		return NULL;
27 
28 	read_lock(&ctx->queuelock);
29 
30 	list_for_each_entry(entry, &ctx->submitqueues, node) {
31 		if (entry->id == id) {
32 			kref_get(&entry->ref);
33 			read_unlock(&ctx->queuelock);
34 
35 			return entry;
36 		}
37 	}
38 
39 	read_unlock(&ctx->queuelock);
40 	return NULL;
41 }
42 
msm_submitqueue_close(struct msm_file_private * ctx)43 void msm_submitqueue_close(struct msm_file_private *ctx)
44 {
45 	struct msm_gpu_submitqueue *entry, *tmp;
46 
47 	if (!ctx)
48 		return;
49 
50 	/*
51 	 * No lock needed in close and there won't
52 	 * be any more user ioctls coming our way
53 	 */
54 	list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
55 		list_del(&entry->node);
56 		msm_submitqueue_put(entry);
57 	}
58 }
59 
msm_submitqueue_create(struct drm_device * drm,struct msm_file_private * ctx,u32 prio,u32 flags,u32 * id)60 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
61 		u32 prio, u32 flags, u32 *id)
62 {
63 	struct msm_drm_private *priv = drm->dev_private;
64 	struct msm_gpu_submitqueue *queue;
65 
66 	if (!ctx)
67 		return -ENODEV;
68 
69 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
70 
71 	if (!queue)
72 		return -ENOMEM;
73 
74 	kref_init(&queue->ref);
75 	queue->flags = flags;
76 
77 	if (priv->gpu) {
78 		if (prio >= priv->gpu->nr_rings) {
79 			kfree(queue);
80 			return -EINVAL;
81 		}
82 
83 		queue->prio = prio;
84 	}
85 
86 	write_lock(&ctx->queuelock);
87 
88 	queue->ctx = msm_file_private_get(ctx);
89 	queue->id = ctx->queueid++;
90 
91 	if (id)
92 		*id = queue->id;
93 
94 	list_add_tail(&queue->node, &ctx->submitqueues);
95 
96 	write_unlock(&ctx->queuelock);
97 
98 	return 0;
99 }
100 
msm_submitqueue_init(struct drm_device * drm,struct msm_file_private * ctx)101 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
102 {
103 	struct msm_drm_private *priv = drm->dev_private;
104 	int default_prio;
105 
106 	if (!ctx)
107 		return 0;
108 
109 	/*
110 	 * Select priority 2 as the "default priority" unless nr_rings is less
111 	 * than 2 and then pick the lowest pirority
112 	 */
113 	default_prio = priv->gpu ?
114 		clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
115 
116 	INIT_LIST_HEAD(&ctx->submitqueues);
117 
118 	rwlock_init(&ctx->queuelock);
119 
120 	return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
121 }
122 
msm_submitqueue_query_faults(struct msm_gpu_submitqueue * queue,struct drm_msm_submitqueue_query * args)123 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
124 		struct drm_msm_submitqueue_query *args)
125 {
126 	size_t size = min_t(size_t, args->len, sizeof(queue->faults));
127 	int ret;
128 
129 	/* If a zero length was passed in, return the data size we expect */
130 	if (!args->len) {
131 		args->len = sizeof(queue->faults);
132 		return 0;
133 	}
134 
135 	/* Set the length to the actual size of the data */
136 	args->len = size;
137 
138 	ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
139 
140 	return ret ? -EFAULT : 0;
141 }
142 
msm_submitqueue_query(struct drm_device * drm,struct msm_file_private * ctx,struct drm_msm_submitqueue_query * args)143 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
144 		struct drm_msm_submitqueue_query *args)
145 {
146 	struct msm_gpu_submitqueue *queue;
147 	int ret = -EINVAL;
148 
149 	if (args->pad)
150 		return -EINVAL;
151 
152 	queue = msm_submitqueue_get(ctx, args->id);
153 	if (!queue)
154 		return -ENOENT;
155 
156 	if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
157 		ret = msm_submitqueue_query_faults(queue, args);
158 
159 	msm_submitqueue_put(queue);
160 
161 	return ret;
162 }
163 
msm_submitqueue_remove(struct msm_file_private * ctx,u32 id)164 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
165 {
166 	struct msm_gpu_submitqueue *entry;
167 
168 	if (!ctx)
169 		return 0;
170 
171 	/*
172 	 * id 0 is the "default" queue and can't be destroyed
173 	 * by the user
174 	 */
175 	if (!id)
176 		return -ENOENT;
177 
178 	write_lock(&ctx->queuelock);
179 
180 	list_for_each_entry(entry, &ctx->submitqueues, node) {
181 		if (entry->id == id) {
182 			list_del(&entry->node);
183 			write_unlock(&ctx->queuelock);
184 
185 			msm_submitqueue_put(entry);
186 			return 0;
187 		}
188 	}
189 
190 	write_unlock(&ctx->queuelock);
191 	return -ENOENT;
192 }
193 
194