• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #ifndef __INTEL_CONTEXT_TYPES__
8 #define __INTEL_CONTEXT_TYPES__
9 
10 #include <linux/average.h>
11 #include <linux/kref.h>
12 #include <linux/list.h>
13 #include <linux/mutex.h>
14 #include <linux/types.h>
15 
16 #include "i915_active_types.h"
17 #include "i915_utils.h"
18 #include "intel_engine_types.h"
19 #include "intel_sseu.h"
20 
21 #define CONTEXT_REDZONE POISON_INUSE
22 
23 DECLARE_EWMA(runtime, 3, 8);
24 
25 struct i915_gem_context;
26 struct i915_gem_ww_ctx;
27 struct i915_vma;
28 struct intel_breadcrumbs;
29 struct intel_context;
30 struct intel_ring;
31 
32 struct intel_context_ops {
33 	int (*alloc)(struct intel_context *ce);
34 
35 	int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
36 	int (*pin)(struct intel_context *ce, void *vaddr);
37 	void (*unpin)(struct intel_context *ce);
38 	void (*post_unpin)(struct intel_context *ce);
39 
40 	void (*enter)(struct intel_context *ce);
41 	void (*exit)(struct intel_context *ce);
42 
43 	void (*reset)(struct intel_context *ce);
44 	void (*destroy)(struct kref *kref);
45 };
46 
47 struct intel_context {
48 	/*
49 	 * Note: Some fields may be accessed under RCU.
50 	 *
51 	 * Unless otherwise noted a field can safely be assumed to be protected
52 	 * by strong reference counting.
53 	 */
54 	union {
55 		struct kref ref; /* no kref_get_unless_zero()! */
56 		struct rcu_head rcu;
57 	};
58 
59 	struct intel_engine_cs *engine;
60 	struct intel_engine_cs *inflight;
61 #define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
62 #define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
63 
64 	struct i915_address_space *vm;
65 	struct i915_gem_context __rcu *gem_context;
66 
67 	/*
68 	 * @signal_lock protects the list of requests that need signaling,
69 	 * @signals. While there are any requests that need signaling,
70 	 * we add the context to the breadcrumbs worker, and remove it
71 	 * upon completion/cancellation of the last request.
72 	 */
73 	struct list_head signal_link; /* Accessed under RCU */
74 	struct list_head signals; /* Guarded by signal_lock */
75 	spinlock_t signal_lock; /* protects signals, the list of requests */
76 
77 	struct i915_vma *state;
78 	struct intel_ring *ring;
79 	struct intel_timeline *timeline;
80 
81 	unsigned long flags;
82 #define CONTEXT_BARRIER_BIT		0
83 #define CONTEXT_ALLOC_BIT		1
84 #define CONTEXT_VALID_BIT		2
85 #define CONTEXT_CLOSED_BIT		3
86 #define CONTEXT_USE_SEMAPHORES		4
87 #define CONTEXT_BANNED			5
88 #define CONTEXT_FORCE_SINGLE_SUBMISSION	6
89 #define CONTEXT_NOPREEMPT		7
90 
91 	u32 *lrc_reg_state;
92 	union {
93 		struct {
94 			u32 lrca;
95 			u32 ccid;
96 		};
97 		u64 desc;
98 	} lrc;
99 	u32 tag; /* cookie passed to HW to track this context on submission */
100 
101 	/* Time on GPU as tracked by the hw. */
102 	struct {
103 		struct ewma_runtime avg;
104 		u64 total;
105 		u32 last;
106 		I915_SELFTEST_DECLARE(u32 num_underflow);
107 		I915_SELFTEST_DECLARE(u32 max_underflow);
108 	} runtime;
109 
110 	unsigned int active_count; /* protected by timeline->mutex */
111 
112 	atomic_t pin_count;
113 	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
114 
115 	/**
116 	 * active: Active tracker for the rq activity (inc. external) on this
117 	 * intel_context object.
118 	 */
119 	struct i915_active active;
120 
121 	const struct intel_context_ops *ops;
122 
123 	/** sseu: Control eu/slice partitioning */
124 	struct intel_sseu sseu;
125 
126 	u8 wa_bb_page; /* if set, page num reserved for context workarounds */
127 };
128 
129 #endif /* __INTEL_CONTEXT_TYPES__ */
130