1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include "i915_selftest.h"
7
8 #include "gt/intel_engine_pm.h"
9 #include "selftests/igt_flush_test.h"
10
read_reloc(const u32 * map,int x,const u64 mask)11 static u64 read_reloc(const u32 *map, int x, const u64 mask)
12 {
13 u64 reloc;
14
15 memcpy(&reloc, &map[x], sizeof(reloc));
16 return reloc & mask;
17 }
18
__igt_gpu_reloc(struct i915_execbuffer * eb,struct drm_i915_gem_object * obj)19 static int __igt_gpu_reloc(struct i915_execbuffer *eb,
20 struct drm_i915_gem_object *obj)
21 {
22 const unsigned int offsets[] = { 8, 3, 0 };
23 const u64 mask =
24 GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
25 const u32 *map = page_mask_bits(obj->mm.mapping);
26 struct i915_request *rq;
27 struct i915_vma *vma;
28 int err;
29 int i;
30
31 vma = i915_vma_instance(obj, eb->context->vm, NULL);
32 if (IS_ERR(vma))
33 return PTR_ERR(vma);
34
35 err = i915_gem_object_lock(obj, &eb->ww);
36 if (err)
37 return err;
38
39 err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH);
40 if (err)
41 return err;
42
43 /* 8-Byte aligned */
44 err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
45 if (err <= 0)
46 goto reloc_err;
47
48 /* !8-Byte aligned */
49 err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
50 if (err <= 0)
51 goto reloc_err;
52
53 /* Skip to the end of the cmd page */
54 i = PAGE_SIZE / sizeof(u32) - 1;
55 i -= eb->reloc_cache.rq_size;
56 memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
57 MI_NOOP, i);
58 eb->reloc_cache.rq_size += i;
59
60 /* Force next batch */
61 err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
62 if (err <= 0)
63 goto reloc_err;
64
65 GEM_BUG_ON(!eb->reloc_cache.rq);
66 rq = i915_request_get(eb->reloc_cache.rq);
67 reloc_gpu_flush(eb, &eb->reloc_cache);
68 GEM_BUG_ON(eb->reloc_cache.rq);
69
70 err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
71 if (err) {
72 intel_gt_set_wedged(eb->engine->gt);
73 goto put_rq;
74 }
75
76 if (!i915_request_completed(rq)) {
77 pr_err("%s: did not wait for relocations!\n", eb->engine->name);
78 err = -EINVAL;
79 goto put_rq;
80 }
81
82 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
83 u64 reloc = read_reloc(map, offsets[i], mask);
84
85 if (reloc != i) {
86 pr_err("%s[%d]: map[%d] %llx != %x\n",
87 eb->engine->name, i, offsets[i], reloc, i);
88 err = -EINVAL;
89 }
90 }
91 if (err)
92 igt_hexdump(map, 4096);
93
94 put_rq:
95 i915_request_put(rq);
96 unpin_vma:
97 i915_vma_unpin(vma);
98 return err;
99
100 reloc_err:
101 if (!err)
102 err = -EIO;
103 goto unpin_vma;
104 }
105
igt_gpu_reloc(void * arg)106 static int igt_gpu_reloc(void *arg)
107 {
108 struct i915_execbuffer eb;
109 struct drm_i915_gem_object *scratch;
110 int err = 0;
111 u32 *map;
112
113 eb.i915 = arg;
114
115 scratch = i915_gem_object_create_internal(eb.i915, 4096);
116 if (IS_ERR(scratch))
117 return PTR_ERR(scratch);
118
119 map = i915_gem_object_pin_map_unlocked(scratch, I915_MAP_WC);
120 if (IS_ERR(map)) {
121 err = PTR_ERR(map);
122 goto err_scratch;
123 }
124
125 intel_gt_pm_get(&eb.i915->gt);
126
127 for_each_uabi_engine(eb.engine, eb.i915) {
128 if (intel_engine_requires_cmd_parser(eb.engine) ||
129 intel_engine_using_cmd_parser(eb.engine))
130 continue;
131
132 reloc_cache_init(&eb.reloc_cache, eb.i915);
133 memset(map, POISON_INUSE, 4096);
134
135 intel_engine_pm_get(eb.engine);
136 eb.context = intel_context_create(eb.engine);
137 if (IS_ERR(eb.context)) {
138 err = PTR_ERR(eb.context);
139 goto err_pm;
140 }
141 eb.reloc_pool = NULL;
142 eb.reloc_context = NULL;
143
144 i915_gem_ww_ctx_init(&eb.ww, false);
145 retry:
146 err = intel_context_pin_ww(eb.context, &eb.ww);
147 if (!err) {
148 err = __igt_gpu_reloc(&eb, scratch);
149
150 intel_context_unpin(eb.context);
151 }
152 if (err == -EDEADLK) {
153 err = i915_gem_ww_ctx_backoff(&eb.ww);
154 if (!err)
155 goto retry;
156 }
157 i915_gem_ww_ctx_fini(&eb.ww);
158
159 if (eb.reloc_pool)
160 intel_gt_buffer_pool_put(eb.reloc_pool);
161 if (eb.reloc_context)
162 intel_context_put(eb.reloc_context);
163
164 intel_context_put(eb.context);
165 err_pm:
166 intel_engine_pm_put(eb.engine);
167 if (err)
168 break;
169 }
170
171 if (igt_flush_test(eb.i915))
172 err = -EIO;
173
174 intel_gt_pm_put(&eb.i915->gt);
175 err_scratch:
176 i915_gem_object_put(scratch);
177 return err;
178 }
179
i915_gem_execbuffer_live_selftests(struct drm_i915_private * i915)180 int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
181 {
182 static const struct i915_subtest tests[] = {
183 SUBTEST(igt_gpu_reloc),
184 };
185
186 if (intel_gt_is_wedged(&i915->gt))
187 return 0;
188
189 return i915_live_subtests(tests, i915);
190 }
191