• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_GT_TYPES__
7 #define __INTEL_GT_TYPES__
8 
9 #include <linux/ktime.h>
10 #include <linux/list.h>
11 #include <linux/llist.h>
12 #include <linux/mutex.h>
13 #include <linux/notifier.h>
14 #include <linux/spinlock.h>
15 #include <linux/types.h>
16 #include <linux/workqueue.h>
17 
18 #include "uc/intel_uc.h"
19 
20 #include "i915_vma.h"
21 #include "intel_engine_types.h"
22 #include "intel_gt_buffer_pool_types.h"
23 #include "intel_llc_types.h"
24 #include "intel_reset_types.h"
25 #include "intel_rc6_types.h"
26 #include "intel_rps_types.h"
27 #include "intel_migrate_types.h"
28 #include "intel_wakeref.h"
29 
30 struct drm_i915_private;
31 struct i915_ggtt;
32 struct intel_engine_cs;
33 struct intel_uncore;
34 
35 struct intel_mmio_range {
36 	u32 start;
37 	u32 end;
38 };
39 
40 /*
41  * The hardware has multiple kinds of multicast register ranges that need
42  * special register steering (and future platforms are expected to add
43  * additional types).
44  *
45  * During driver startup, we initialize the steering control register to
46  * direct reads to a slice/subslice that are valid for the 'subslice' class
47  * of multicast registers.  If another type of steering does not have any
48  * overlap in valid steering targets with 'subslice' style registers, we will
49  * need to explicitly re-steer reads of registers of the other type.
50  *
51  * Only the replication types that may need additional non-default steering
52  * are listed here.
53  */
54 enum intel_steering_type {
55 	L3BANK,
56 	MSLICE,
57 	LNCF,
58 
59 	NUM_STEERING_TYPES
60 };
61 
62 enum intel_submission_method {
63 	INTEL_SUBMISSION_RING,
64 	INTEL_SUBMISSION_ELSP,
65 	INTEL_SUBMISSION_GUC,
66 };
67 
68 struct intel_gt {
69 	struct drm_i915_private *i915;
70 	struct intel_uncore *uncore;
71 	struct i915_ggtt *ggtt;
72 
73 	struct intel_uc uc;
74 
75 	struct mutex tlb_invalidate_lock;
76 
77 	struct intel_gt_timelines {
78 		spinlock_t lock; /* protects active_list */
79 		struct list_head active_list;
80 	} timelines;
81 
82 	struct intel_gt_requests {
83 		/**
84 		 * We leave the user IRQ off as much as possible,
85 		 * but this means that requests will finish and never
86 		 * be retired once the system goes idle. Set a timer to
87 		 * fire periodically while the ring is running. When it
88 		 * fires, go retire requests.
89 		 */
90 		struct delayed_work retire_work;
91 	} requests;
92 
93 	struct {
94 		struct llist_head list;
95 		struct work_struct work;
96 	} watchdog;
97 
98 	struct intel_wakeref wakeref;
99 	atomic_t user_wakeref;
100 
101 	struct list_head closed_vma;
102 	spinlock_t closed_lock; /* guards the list of closed_vma */
103 
104 	ktime_t last_init_time;
105 	struct intel_reset reset;
106 
107 	/**
108 	 * Is the GPU currently considered idle, or busy executing
109 	 * userspace requests? Whilst idle, we allow runtime power
110 	 * management to power down the hardware and display clocks.
111 	 * In order to reduce the effect on performance, there
112 	 * is a slight delay before we do so.
113 	 */
114 	intel_wakeref_t awake;
115 
116 	u32 clock_frequency;
117 	u32 clock_period_ns;
118 
119 	struct intel_llc llc;
120 	struct intel_rc6 rc6;
121 	struct intel_rps rps;
122 
123 	spinlock_t irq_lock;
124 	u32 gt_imr;
125 	u32 pm_ier;
126 	u32 pm_imr;
127 
128 	u32 pm_guc_events;
129 
130 	struct {
131 		bool active;
132 
133 		/**
134 		 * @lock: Lock protecting the below fields.
135 		 */
136 		seqcount_mutex_t lock;
137 
138 		/**
139 		 * @total: Total time this engine was busy.
140 		 *
141 		 * Accumulated time not counting the most recent block in cases
142 		 * where engine is currently busy (active > 0).
143 		 */
144 		ktime_t total;
145 
146 		/**
147 		 * @start: Timestamp of the last idle to active transition.
148 		 *
149 		 * Idle is defined as active == 0, active is active > 0.
150 		 */
151 		ktime_t start;
152 	} stats;
153 
154 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
155 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
156 					    [MAX_ENGINE_INSTANCE + 1];
157 	enum intel_submission_method submission_method;
158 
159 	/*
160 	 * Default address space (either GGTT or ppGTT depending on arch).
161 	 *
162 	 * Reserved for exclusive use by the kernel.
163 	 */
164 	struct i915_address_space *vm;
165 
166 	/*
167 	 * A pool of objects to use as shadow copies of client batch buffers
168 	 * when the command parser is enabled. Prevents the client from
169 	 * modifying the batch contents after software parsing.
170 	 *
171 	 * Buffers older than 1s are periodically reaped from the pool,
172 	 * or may be reclaimed by the shrinker before then.
173 	 */
174 	struct intel_gt_buffer_pool buffer_pool;
175 
176 	struct i915_vma *scratch;
177 
178 	struct intel_migrate migrate;
179 
180 	const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
181 
182 	struct intel_gt_info {
183 		intel_engine_mask_t engine_mask;
184 
185 		u32 l3bank_mask;
186 
187 		u8 num_engines;
188 
189 		/* Media engine access to SFC per instance */
190 		u8 vdbox_sfc_access;
191 
192 		/* Slice/subslice/EU info */
193 		struct sseu_dev_info sseu;
194 
195 		unsigned long mslice_mask;
196 	} info;
197 };
198 
199 enum intel_gt_scratch_field {
200 	/* 8 bytes */
201 	INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
202 
203 	/* 8 bytes */
204 	INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
205 
206 	/* 8 bytes */
207 	INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
208 
209 	/* 6 * 8 bytes */
210 	INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
211 
212 	/* 4 bytes */
213 	INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
214 };
215 
216 #endif /* __INTEL_GT_TYPES_H__ */
217