1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7 #include "igt_gem_utils.h"
8
9 #include "gem/i915_gem_context.h"
10 #include "gem/i915_gem_pm.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_gt.h"
13 #include "i915_vma.h"
14 #include "i915_drv.h"
15
16 #include "i915_request.h"
17
18 struct i915_request *
igt_request_alloc(struct i915_gem_context * ctx,struct intel_engine_cs * engine)19 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
20 {
21 struct intel_context *ce;
22 struct i915_request *rq;
23
24 /*
25 * Pinning the contexts may generate requests in order to acquire
26 * GGTT space, so do this first before we reserve a seqno for
27 * ourselves.
28 */
29 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
30 if (IS_ERR(ce))
31 return ERR_CAST(ce);
32
33 rq = intel_context_create_request(ce);
34 intel_context_put(ce);
35
36 return rq;
37 }
38
39 struct i915_vma *
igt_emit_store_dw(struct i915_vma * vma,u64 offset,unsigned long count,u32 val)40 igt_emit_store_dw(struct i915_vma *vma,
41 u64 offset,
42 unsigned long count,
43 u32 val)
44 {
45 struct drm_i915_gem_object *obj;
46 const int gen = INTEL_GEN(vma->vm->i915);
47 unsigned long n, size;
48 u32 *cmd;
49 int err;
50
51 size = (4 * count + 1) * sizeof(u32);
52 size = round_up(size, PAGE_SIZE);
53 obj = i915_gem_object_create_internal(vma->vm->i915, size);
54 if (IS_ERR(obj))
55 return ERR_CAST(obj);
56
57 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
58 if (IS_ERR(cmd)) {
59 err = PTR_ERR(cmd);
60 goto err;
61 }
62
63 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
64 offset += vma->node.start;
65
66 for (n = 0; n < count; n++) {
67 if (gen >= 8) {
68 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
69 *cmd++ = lower_32_bits(offset);
70 *cmd++ = upper_32_bits(offset);
71 *cmd++ = val;
72 } else if (gen >= 4) {
73 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
74 (gen < 6 ? MI_USE_GGTT : 0);
75 *cmd++ = 0;
76 *cmd++ = offset;
77 *cmd++ = val;
78 } else {
79 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
80 *cmd++ = offset;
81 *cmd++ = val;
82 }
83 offset += PAGE_SIZE;
84 }
85 *cmd = MI_BATCH_BUFFER_END;
86
87 i915_gem_object_flush_map(obj);
88 i915_gem_object_unpin_map(obj);
89
90 intel_gt_chipset_flush(vma->vm->gt);
91
92 vma = i915_vma_instance(obj, vma->vm, NULL);
93 if (IS_ERR(vma)) {
94 err = PTR_ERR(vma);
95 goto err;
96 }
97
98 err = i915_vma_pin(vma, 0, 0, PIN_USER);
99 if (err)
100 goto err;
101
102 return vma;
103
104 err:
105 i915_gem_object_put(obj);
106 return ERR_PTR(err);
107 }
108
igt_gpu_fill_dw(struct intel_context * ce,struct i915_vma * vma,u64 offset,unsigned long count,u32 val)109 int igt_gpu_fill_dw(struct intel_context *ce,
110 struct i915_vma *vma, u64 offset,
111 unsigned long count, u32 val)
112 {
113 struct i915_request *rq;
114 struct i915_vma *batch;
115 unsigned int flags;
116 int err;
117
118 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
119 GEM_BUG_ON(!i915_vma_is_pinned(vma));
120
121 batch = igt_emit_store_dw(vma, offset, count, val);
122 if (IS_ERR(batch))
123 return PTR_ERR(batch);
124
125 rq = intel_context_create_request(ce);
126 if (IS_ERR(rq)) {
127 err = PTR_ERR(rq);
128 goto err_batch;
129 }
130
131 i915_vma_lock(batch);
132 err = i915_request_await_object(rq, batch->obj, false);
133 if (err == 0)
134 err = i915_vma_move_to_active(batch, rq, 0);
135 i915_vma_unlock(batch);
136 if (err)
137 goto skip_request;
138
139 i915_vma_lock(vma);
140 err = i915_request_await_object(rq, vma->obj, true);
141 if (err == 0)
142 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143 i915_vma_unlock(vma);
144 if (err)
145 goto skip_request;
146
147 flags = 0;
148 if (INTEL_GEN(ce->vm->i915) <= 5)
149 flags |= I915_DISPATCH_SECURE;
150
151 err = rq->engine->emit_bb_start(rq,
152 batch->node.start, batch->node.size,
153 flags);
154
155 skip_request:
156 if (err)
157 i915_request_set_error_once(rq, err);
158 i915_request_add(rq);
159 err_batch:
160 i915_vma_unpin_and_release(&batch, 0);
161 return err;
162 }
163