1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24 #ifndef _INTEL_UC_H_
25 #define _INTEL_UC_H_
26
27 #include "intel_guc_fwif.h"
28 #include "i915_guc_reg.h"
29 #include "intel_ringbuffer.h"
30 #include "intel_guc_ct.h"
31 #include "i915_vma.h"
32
33 struct drm_i915_gem_request;
34
35 /*
36 * This structure primarily describes the GEM object shared with the GuC.
37 * The specs sometimes refer to this object as a "GuC context", but we use
38 * the term "client" to avoid confusion with hardware contexts. This
39 * GEM object is held for the entire lifetime of our interaction with
40 * the GuC, being allocated before the GuC is loaded with its firmware.
41 * Because there's no way to update the address used by the GuC after
42 * initialisation, the shared object must stay pinned into the GGTT as
43 * long as the GuC is in use. We also keep the first page (only) mapped
44 * into kernel address space, as it includes shared data that must be
45 * updated on every request submission.
46 *
47 * The single GEM object described here is actually made up of several
48 * separate areas, as far as the GuC is concerned. The first page (kept
49 * kmap'd) includes the "process descriptor" which holds sequence data for
50 * the doorbell, and one cacheline which actually *is* the doorbell; a
51 * write to this will "ring the doorbell" (i.e. send an interrupt to the
52 * GuC). The subsequent pages of the client object constitute the work
53 * queue (a circular array of work items), again described in the process
54 * descriptor. Work queue pages are mapped momentarily as required.
55 *
56 * We also keep a few statistics on failures. Ideally, these should all
57 * be zero!
58 * no_wq_space: times that the submission pre-check found no space was
59 * available in the work queue (note, the queue is shared,
60 * not per-engine). It is OK for this to be nonzero, but
61 * it should not be huge!
62 * b_fail: failed to ring the doorbell. This should never happen, unless
63 * somehow the hardware misbehaves, or maybe if the GuC firmware
64 * crashes? We probably need to reset the GPU to recover.
65 * retcode: errno from last guc_submit()
66 */
67 struct i915_guc_client {
68 struct i915_vma *vma;
69 void *vaddr;
70 struct i915_gem_context *owner;
71 struct intel_guc *guc;
72
73 uint32_t engines; /* bitmap of (host) engine ids */
74 uint32_t priority;
75 u32 stage_id;
76 uint32_t proc_desc_offset;
77
78 u16 doorbell_id;
79 unsigned long doorbell_offset;
80 u32 doorbell_cookie;
81
82 spinlock_t wq_lock;
83 uint32_t wq_offset;
84 uint32_t wq_size;
85 uint32_t wq_tail;
86 uint32_t wq_rsvd;
87 uint32_t no_wq_space;
88
89 /* Per-engine counts of GuC submissions */
90 uint64_t submissions[I915_NUM_ENGINES];
91 };
92
93 enum intel_uc_fw_status {
94 INTEL_UC_FIRMWARE_FAIL = -1,
95 INTEL_UC_FIRMWARE_NONE = 0,
96 INTEL_UC_FIRMWARE_PENDING,
97 INTEL_UC_FIRMWARE_SUCCESS
98 };
99
100 /* User-friendly representation of an enum */
101 static inline
intel_uc_fw_status_repr(enum intel_uc_fw_status status)102 const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
103 {
104 switch (status) {
105 case INTEL_UC_FIRMWARE_FAIL:
106 return "FAIL";
107 case INTEL_UC_FIRMWARE_NONE:
108 return "NONE";
109 case INTEL_UC_FIRMWARE_PENDING:
110 return "PENDING";
111 case INTEL_UC_FIRMWARE_SUCCESS:
112 return "SUCCESS";
113 }
114 return "<invalid>";
115 }
116
117 enum intel_uc_fw_type {
118 INTEL_UC_FW_TYPE_GUC,
119 INTEL_UC_FW_TYPE_HUC
120 };
121
122 /* User-friendly representation of an enum */
intel_uc_fw_type_repr(enum intel_uc_fw_type type)123 static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
124 {
125 switch (type) {
126 case INTEL_UC_FW_TYPE_GUC:
127 return "GuC";
128 case INTEL_UC_FW_TYPE_HUC:
129 return "HuC";
130 }
131 return "uC";
132 }
133
134 /*
135 * This structure encapsulates all the data needed during the process
136 * of fetching, caching, and loading the firmware image into the GuC.
137 */
138 struct intel_uc_fw {
139 const char *path;
140 size_t size;
141 struct drm_i915_gem_object *obj;
142 enum intel_uc_fw_status fetch_status;
143 enum intel_uc_fw_status load_status;
144
145 uint16_t major_ver_wanted;
146 uint16_t minor_ver_wanted;
147 uint16_t major_ver_found;
148 uint16_t minor_ver_found;
149
150 enum intel_uc_fw_type type;
151 uint32_t header_size;
152 uint32_t header_offset;
153 uint32_t rsa_size;
154 uint32_t rsa_offset;
155 uint32_t ucode_size;
156 uint32_t ucode_offset;
157 };
158
159 struct intel_guc_log {
160 uint32_t flags;
161 struct i915_vma *vma;
162 /* The runtime stuff gets created only when GuC logging gets enabled */
163 struct {
164 void *buf_addr;
165 struct workqueue_struct *flush_wq;
166 struct work_struct flush_work;
167 struct rchan *relay_chan;
168 } runtime;
169 /* logging related stats */
170 u32 capture_miss_count;
171 u32 flush_interrupt_count;
172 u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
173 u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
174 u32 flush_count[GUC_MAX_LOG_BUFFER];
175 };
176
177 struct intel_guc {
178 struct intel_uc_fw fw;
179 struct intel_guc_log log;
180 struct intel_guc_ct ct;
181
182 /* Log snapshot if GuC errors during load */
183 struct drm_i915_gem_object *load_err_log;
184
185 /* intel_guc_recv interrupt related state */
186 bool interrupts_enabled;
187
188 struct i915_vma *ads_vma;
189 struct i915_vma *stage_desc_pool;
190 void *stage_desc_pool_vaddr;
191 struct ida stage_ids;
192
193 struct i915_guc_client *execbuf_client;
194
195 DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
196 uint32_t db_cacheline; /* Cyclic counter mod pagesize */
197
198 /* GuC's FW specific registers used in MMIO send */
199 struct {
200 u32 base;
201 unsigned int count;
202 enum forcewake_domains fw_domains;
203 } send_regs;
204
205 /* To serialize the intel_guc_send actions */
206 struct mutex send_mutex;
207
208 /* GuC's FW specific send function */
209 int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
210
211 /* GuC's FW specific notify function */
212 void (*notify)(struct intel_guc *guc);
213 };
214
215 struct intel_huc {
216 /* Generic uC firmware management */
217 struct intel_uc_fw fw;
218
219 /* HuC-specific additions */
220 };
221
222 /* intel_uc.c */
223 void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
224 void intel_uc_init_early(struct drm_i915_private *dev_priv);
225 void intel_uc_init_fw(struct drm_i915_private *dev_priv);
226 void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
227 int intel_uc_init_hw(struct drm_i915_private *dev_priv);
228 void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
229 int intel_guc_sample_forcewake(struct intel_guc *guc);
230 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
231 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
232
intel_guc_send(struct intel_guc * guc,const u32 * action,u32 len)233 static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
234 {
235 return guc->send(guc, action, len);
236 }
237
intel_guc_notify(struct intel_guc * guc)238 static inline void intel_guc_notify(struct intel_guc *guc)
239 {
240 guc->notify(guc);
241 }
242
243 /* intel_guc_loader.c */
244 int intel_guc_select_fw(struct intel_guc *guc);
245 int intel_guc_init_hw(struct intel_guc *guc);
246 int intel_guc_suspend(struct drm_i915_private *dev_priv);
247 int intel_guc_resume(struct drm_i915_private *dev_priv);
248 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
249
250 /* i915_guc_submission.c */
251 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
252 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
253 int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
254 void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
255 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
256 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
257 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
258
259 /* intel_guc_log.c */
260 int intel_guc_log_create(struct intel_guc *guc);
261 void intel_guc_log_destroy(struct intel_guc *guc);
262 int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
263 void i915_guc_log_register(struct drm_i915_private *dev_priv);
264 void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
265
guc_ggtt_offset(struct i915_vma * vma)266 static inline u32 guc_ggtt_offset(struct i915_vma *vma)
267 {
268 u32 offset = i915_ggtt_offset(vma);
269 GEM_BUG_ON(offset < GUC_WOPCM_TOP);
270 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
271 return offset;
272 }
273
274 /* intel_huc.c */
275 void intel_huc_select_fw(struct intel_huc *huc);
276 void intel_huc_init_hw(struct intel_huc *huc);
277 void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
278
279 #endif
280