1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_GT_TYPES__ 7 #define __INTEL_GT_TYPES__ 8 9 #include <linux/ktime.h> 10 #include <linux/list.h> 11 #include <linux/mutex.h> 12 #include <linux/notifier.h> 13 #include <linux/spinlock.h> 14 #include <linux/types.h> 15 16 #include "uc/intel_uc.h" 17 18 #include "i915_vma.h" 19 #include "intel_engine_types.h" 20 #include "intel_gt_buffer_pool_types.h" 21 #include "intel_llc_types.h" 22 #include "intel_reset_types.h" 23 #include "intel_rc6_types.h" 24 #include "intel_rps_types.h" 25 #include "intel_wakeref.h" 26 27 struct drm_i915_private; 28 struct i915_ggtt; 29 struct intel_engine_cs; 30 struct intel_uncore; 31 32 struct intel_gt { 33 struct drm_i915_private *i915; 34 struct intel_uncore *uncore; 35 struct i915_ggtt *ggtt; 36 37 struct intel_uc uc; 38 39 struct mutex tlb_invalidate_lock; 40 41 struct intel_gt_timelines { 42 spinlock_t lock; /* protects active_list */ 43 struct list_head active_list; 44 45 /* Pack multiple timelines' seqnos into the same page */ 46 spinlock_t hwsp_lock; 47 struct list_head hwsp_free_list; 48 } timelines; 49 50 struct intel_gt_requests { 51 /** 52 * We leave the user IRQ off as much as possible, 53 * but this means that requests will finish and never 54 * be retired once the system goes idle. Set a timer to 55 * fire periodically while the ring is running. When it 56 * fires, go retire requests. 57 */ 58 struct delayed_work retire_work; 59 } requests; 60 61 struct intel_wakeref wakeref; 62 atomic_t user_wakeref; 63 64 struct list_head closed_vma; 65 spinlock_t closed_lock; /* guards the list of closed_vma */ 66 67 ktime_t last_init_time; 68 struct intel_reset reset; 69 70 /** 71 * Is the GPU currently considered idle, or busy executing 72 * userspace requests? Whilst idle, we allow runtime power 73 * management to power down the hardware and display clocks. 74 * In order to reduce the effect on performance, there 75 * is a slight delay before we do so. 76 */ 77 intel_wakeref_t awake; 78 79 u32 clock_frequency; 80 81 struct intel_llc llc; 82 struct intel_rc6 rc6; 83 struct intel_rps rps; 84 85 spinlock_t irq_lock; 86 u32 gt_imr; 87 u32 pm_ier; 88 u32 pm_imr; 89 90 u32 pm_guc_events; 91 92 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 93 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] 94 [MAX_ENGINE_INSTANCE + 1]; 95 96 /* 97 * Default address space (either GGTT or ppGTT depending on arch). 98 * 99 * Reserved for exclusive use by the kernel. 100 */ 101 struct i915_address_space *vm; 102 103 /* 104 * A pool of objects to use as shadow copies of client batch buffers 105 * when the command parser is enabled. Prevents the client from 106 * modifying the batch contents after software parsing. 107 * 108 * Buffers older than 1s are periodically reaped from the pool, 109 * or may be reclaimed by the shrinker before then. 110 */ 111 struct intel_gt_buffer_pool buffer_pool; 112 113 struct i915_vma *scratch; 114 115 struct intel_gt_info { 116 intel_engine_mask_t engine_mask; 117 u8 num_engines; 118 119 /* Media engine access to SFC per instance */ 120 u8 vdbox_sfc_access; 121 122 /* Slice/subslice/EU info */ 123 struct sseu_dev_info sseu; 124 } info; 125 }; 126 127 enum intel_gt_scratch_field { 128 /* 8 bytes */ 129 INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, 130 131 /* 8 bytes */ 132 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, 133 134 /* 8 bytes */ 135 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, 136 137 /* 6 * 8 bytes */ 138 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048, 139 140 /* 4 bytes */ 141 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096, 142 }; 143 144 #endif /* __INTEL_GT_TYPES_H__ */ 145