1 /*
2 * Copyright © 2020 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "intel_gem.h"
24 #include "drm-uapi/i915_drm.h"
25
26 bool
intel_gem_supports_syncobj_wait(int fd)27 intel_gem_supports_syncobj_wait(int fd)
28 {
29 int ret;
30
31 struct drm_syncobj_create create = {
32 .flags = 0,
33 };
34 ret = intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
35 if (ret)
36 return false;
37
38 uint32_t syncobj = create.handle;
39
40 struct drm_syncobj_wait wait = {
41 .handles = (uint64_t)(uintptr_t)&create,
42 .count_handles = 1,
43 .timeout_nsec = 0,
44 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
45 };
46 ret = intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
47
48 struct drm_syncobj_destroy destroy = {
49 .handle = syncobj,
50 };
51 intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
52
53 /* If it timed out, then we have the ioctl and it supports the
54 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT flag.
55 */
56 return ret == -1 && errno == ETIME;
57 }
58
59 int
intel_gem_count_engines(const struct drm_i915_query_engine_info * info,enum drm_i915_gem_engine_class engine_class)60 intel_gem_count_engines(const struct drm_i915_query_engine_info *info,
61 enum drm_i915_gem_engine_class engine_class)
62 {
63 assert(info != NULL);
64 int count = 0;
65 for (int i = 0; i < info->num_engines; i++) {
66 if (info->engines[i].engine.engine_class == engine_class)
67 count++;
68 }
69 return count;
70 }
71
72 int
intel_gem_create_context_engines(int fd,const struct drm_i915_query_engine_info * info,int num_engines,uint16_t * engine_classes)73 intel_gem_create_context_engines(int fd,
74 const struct drm_i915_query_engine_info *info,
75 int num_engines, uint16_t *engine_classes)
76 {
77 assert(info != NULL);
78 const size_t engine_inst_sz = 2 * sizeof(__u16); /* 1 class, 1 instance */
79 const size_t engines_param_size =
80 sizeof(__u64) /* extensions */ + num_engines * engine_inst_sz;
81
82 void *engines_param = malloc(engines_param_size);
83 assert(engines_param);
84 *(__u64*)engines_param = 0;
85 __u16 *class_inst_ptr = (__u16*)(((__u64*)engines_param) + 1);
86
87 /* For each type of drm_i915_gem_engine_class of interest, we keep track of
88 * the previous engine instance used.
89 */
90 int last_engine_idx[] = {
91 [I915_ENGINE_CLASS_RENDER] = -1,
92 [I915_ENGINE_CLASS_COPY] = -1,
93 [I915_ENGINE_CLASS_COMPUTE] = -1,
94 };
95
96 int i915_engine_counts[] = {
97 [I915_ENGINE_CLASS_RENDER] =
98 intel_gem_count_engines(info, I915_ENGINE_CLASS_RENDER),
99 [I915_ENGINE_CLASS_COPY] =
100 intel_gem_count_engines(info, I915_ENGINE_CLASS_COPY),
101 [I915_ENGINE_CLASS_COMPUTE] =
102 intel_gem_count_engines(info, I915_ENGINE_CLASS_COMPUTE),
103 };
104
105 /* For each queue, we look for the next instance that matches the class we
106 * need.
107 */
108 for (int i = 0; i < num_engines; i++) {
109 uint16_t engine_class = engine_classes[i];
110 assert(engine_class == I915_ENGINE_CLASS_RENDER ||
111 engine_class == I915_ENGINE_CLASS_COPY ||
112 engine_class == I915_ENGINE_CLASS_COMPUTE);
113 if (i915_engine_counts[engine_class] <= 0) {
114 free(engines_param);
115 return -1;
116 }
117
118 /* Run through the engines reported by the kernel looking for the next
119 * matching instance. We loop in case we want to create multiple
120 * contexts on an engine instance.
121 */
122 int engine_instance = -1;
123 for (int i = 0; i < info->num_engines; i++) {
124 int *idx = &last_engine_idx[engine_class];
125 if (++(*idx) >= info->num_engines)
126 *idx = 0;
127 if (info->engines[*idx].engine.engine_class == engine_class) {
128 engine_instance = info->engines[*idx].engine.engine_instance;
129 break;
130 }
131 }
132 if (engine_instance < 0) {
133 free(engines_param);
134 return -1;
135 }
136
137 *class_inst_ptr++ = engine_class;
138 *class_inst_ptr++ = engine_instance;
139 }
140
141 assert((uintptr_t)engines_param + engines_param_size ==
142 (uintptr_t)class_inst_ptr);
143
144 struct drm_i915_gem_context_create_ext_setparam set_engines = {
145 .base = {
146 .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
147 },
148 .param = {
149 .param = I915_CONTEXT_PARAM_ENGINES,
150 .value = (uintptr_t)engines_param,
151 .size = engines_param_size,
152 }
153 };
154 struct drm_i915_gem_context_create_ext create = {
155 .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
156 .extensions = (uintptr_t)&set_engines,
157 };
158 int ret = intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create);
159 free(engines_param);
160 if (ret == -1)
161 return -1;
162
163 return create.ctx_id;
164 }
165