1 /*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Robert Bragg <robert@sixbynine.org>
25 */
26
27
28 /**
29 * DOC: i915 Perf Overview
30 *
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
33 * GPU.
34 *
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
38 *
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41 *
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46 * sysctl option.
47 *
48 */
49
50 /**
51 * DOC: i915 Perf History and Comparison with Core Perf
52 *
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
55 *
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
67 *
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
71 *
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
80 *
81 *
82 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84 *
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
89 *
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
93 *
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
100 *
101 *
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
104 * hit:
105 *
106 * - The perf based OA PMU driver broke some significant design assumptions:
107 *
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
113 *
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
116 *
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
120 *
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
126 *
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
141 *
142 * - As a side note on perf's grouping feature; there was also some concern
143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
147 *
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
150 *
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
154 *
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
165 * command streamer.
166 *
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
177 *
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
180 *
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
192 */
193
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197
198 #include "gem/i915_gem_context.h"
199 #include "gt/intel_engine_pm.h"
200 #include "gt/intel_engine_user.h"
201 #include "gt/intel_execlists_submission.h"
202 #include "gt/intel_gpu_commands.h"
203 #include "gt/intel_gt.h"
204 #include "gt/intel_gt_clock_utils.h"
205 #include "gt/intel_lrc.h"
206 #include "gt/intel_ring.h"
207
208 #include "i915_drv.h"
209 #include "i915_perf.h"
210
211 /* HW requires this to be a power of two, between 128k and 16M, though driver
212 * is currently generally designed assuming the largest 16M size is used such
213 * that the overflow cases are unlikely in normal operation.
214 */
215 #define OA_BUFFER_SIZE SZ_16M
216
217 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
218
219 /**
220 * DOC: OA Tail Pointer Race
221 *
222 * There's a HW race condition between OA unit tail pointer register updates and
223 * writes to memory whereby the tail pointer can sometimes get ahead of what's
224 * been written out to the OA buffer so far (in terms of what's visible to the
225 * CPU).
226 *
227 * Although this can be observed explicitly while copying reports to userspace
228 * by checking for a zeroed report-id field in tail reports, we want to account
229 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
230 * redundant read() attempts.
231 *
232 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
233 * in the OA buffer, starting from the tail reported by the HW until we find a
234 * report with its first 2 dwords not 0 meaning its previous report is
235 * completely in memory and ready to be read. Those dwords are also set to 0
236 * once read and the whole buffer is cleared upon OA buffer initialization. The
237 * first dword is the reason for this report while the second is the timestamp,
238 * making the chances of having those 2 fields at 0 fairly unlikely. A more
239 * detailed explanation is available in oa_buffer_check_unlocked().
240 *
241 * Most of the implementation details for this workaround are in
242 * oa_buffer_check_unlocked() and _append_oa_reports()
243 *
244 * Note for posterity: previously the driver used to define an effective tail
245 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
246 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
247 * This was flawed considering that the OA unit may also automatically generate
248 * non-periodic reports (such as on context switch) or the OA unit may be
249 * enabled without any periodic sampling.
250 */
251 #define OA_TAIL_MARGIN_NSEC 100000ULL
252 #define INVALID_TAIL_PTR 0xffffffff
253
254 /* The default frequency for checking whether the OA unit has written new
255 * reports to the circular OA buffer...
256 */
257 #define DEFAULT_POLL_FREQUENCY_HZ 200
258 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
259
260 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
261 static u32 i915_perf_stream_paranoid = true;
262
263 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
264 * of the 64bit timestamp bits to trigger reports from) but there's currently
265 * no known use case for sampling as infrequently as once per 47 thousand years.
266 *
267 * Since the timestamps included in OA reports are only 32bits it seems
268 * reasonable to limit the OA exponent where it's still possible to account for
269 * overflow in OA report timestamps.
270 */
271 #define OA_EXPONENT_MAX 31
272
273 #define INVALID_CTX_ID 0xffffffff
274
275 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
276 #define OAREPORT_REASON_MASK 0x3f
277 #define OAREPORT_REASON_MASK_EXTENDED 0x7f
278 #define OAREPORT_REASON_SHIFT 19
279 #define OAREPORT_REASON_TIMER (1<<0)
280 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
281 #define OAREPORT_REASON_CLK_RATIO (1<<5)
282
283
284 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
285 *
286 * The highest sampling frequency we can theoretically program the OA unit
287 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
288 *
289 * Initialized just before we register the sysctl parameter.
290 */
291 static int oa_sample_rate_hard_limit;
292
293 /* Theoretically we can program the OA unit to sample every 160ns but don't
294 * allow that by default unless root...
295 *
296 * The default threshold of 100000Hz is based on perf's similar
297 * kernel.perf_event_max_sample_rate sysctl parameter.
298 */
299 static u32 i915_oa_max_sample_rate = 100000;
300
301 /* XXX: beware if future OA HW adds new report formats that the current
302 * code assumes all reports have a power-of-two size and ~(size - 1) can
303 * be used as a mask to align the OA tail pointer.
304 */
305 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
306 [I915_OA_FORMAT_A13] = { 0, 64 },
307 [I915_OA_FORMAT_A29] = { 1, 128 },
308 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
309 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
310 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
311 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
312 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
313 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
314 [I915_OA_FORMAT_A12] = { 0, 64 },
315 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
316 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
317 };
318
319 #define SAMPLE_OA_REPORT (1<<0)
320
321 /**
322 * struct perf_open_properties - for validated properties given to open a stream
323 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
324 * @single_context: Whether a single or all gpu contexts should be monitored
325 * @hold_preemption: Whether the preemption is disabled for the filtered
326 * context
327 * @ctx_handle: A gem ctx handle for use with @single_context
328 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
329 * @oa_format: An OA unit HW report format
330 * @oa_periodic: Whether to enable periodic OA unit sampling
331 * @oa_period_exponent: The OA unit sampling period is derived from this
332 * @engine: The engine (typically rcs0) being monitored by the OA unit
333 * @has_sseu: Whether @sseu was specified by userspace
334 * @sseu: internal SSEU configuration computed either from the userspace
335 * specified configuration in the opening parameters or a default value
336 * (see get_default_sseu_config())
337 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
338 * data availability
339 *
340 * As read_properties_unlocked() enumerates and validates the properties given
341 * to open a stream of metrics the configuration is built up in the structure
342 * which starts out zero initialized.
343 */
344 struct perf_open_properties {
345 u32 sample_flags;
346
347 u64 single_context:1;
348 u64 hold_preemption:1;
349 u64 ctx_handle;
350
351 /* OA sampling state */
352 int metrics_set;
353 int oa_format;
354 bool oa_periodic;
355 int oa_period_exponent;
356
357 struct intel_engine_cs *engine;
358
359 bool has_sseu;
360 struct intel_sseu sseu;
361
362 u64 poll_oa_period;
363 };
364
365 struct i915_oa_config_bo {
366 struct llist_node node;
367
368 struct i915_oa_config *oa_config;
369 struct i915_vma *vma;
370 };
371
372 static struct ctl_table_header *sysctl_header;
373
374 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
375
i915_oa_config_release(struct kref * ref)376 void i915_oa_config_release(struct kref *ref)
377 {
378 struct i915_oa_config *oa_config =
379 container_of(ref, typeof(*oa_config), ref);
380
381 kfree(oa_config->flex_regs);
382 kfree(oa_config->b_counter_regs);
383 kfree(oa_config->mux_regs);
384
385 kfree_rcu(oa_config, rcu);
386 }
387
388 struct i915_oa_config *
i915_perf_get_oa_config(struct i915_perf * perf,int metrics_set)389 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
390 {
391 struct i915_oa_config *oa_config;
392
393 rcu_read_lock();
394 oa_config = idr_find(&perf->metrics_idr, metrics_set);
395 if (oa_config)
396 oa_config = i915_oa_config_get(oa_config);
397 rcu_read_unlock();
398
399 return oa_config;
400 }
401
free_oa_config_bo(struct i915_oa_config_bo * oa_bo)402 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
403 {
404 i915_oa_config_put(oa_bo->oa_config);
405 i915_vma_put(oa_bo->vma);
406 kfree(oa_bo);
407 }
408
gen12_oa_hw_tail_read(struct i915_perf_stream * stream)409 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
410 {
411 struct intel_uncore *uncore = stream->uncore;
412
413 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
414 GEN12_OAG_OATAILPTR_MASK;
415 }
416
gen8_oa_hw_tail_read(struct i915_perf_stream * stream)417 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
418 {
419 struct intel_uncore *uncore = stream->uncore;
420
421 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
422 }
423
gen7_oa_hw_tail_read(struct i915_perf_stream * stream)424 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
425 {
426 struct intel_uncore *uncore = stream->uncore;
427 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
428
429 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
430 }
431
432 /**
433 * oa_buffer_check_unlocked - check for data and update tail ptr state
434 * @stream: i915 stream instance
435 *
436 * This is either called via fops (for blocking reads in user ctx) or the poll
437 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
438 * if there is data available for userspace to read.
439 *
440 * This function is central to providing a workaround for the OA unit tail
441 * pointer having a race with respect to what data is visible to the CPU.
442 * It is responsible for reading tail pointers from the hardware and giving
443 * the pointers time to 'age' before they are made available for reading.
444 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
445 *
446 * Besides returning true when there is data available to read() this function
447 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
448 * object.
449 *
450 * Note: It's safe to read OA config state here unlocked, assuming that this is
451 * only called while the stream is enabled, while the global OA configuration
452 * can't be modified.
453 *
454 * Returns: %true if the OA buffer contains data, else %false
455 */
oa_buffer_check_unlocked(struct i915_perf_stream * stream)456 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
457 {
458 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
459 int report_size = stream->oa_buffer.format_size;
460 unsigned long flags;
461 bool pollin;
462 u32 hw_tail;
463 u64 now;
464
465 /* We have to consider the (unlikely) possibility that read() errors
466 * could result in an OA buffer reset which might reset the head and
467 * tail state.
468 */
469 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
470
471 hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
472
473 /* The tail pointer increases in 64 byte increments,
474 * not in report_size steps...
475 */
476 hw_tail &= ~(report_size - 1);
477
478 now = ktime_get_mono_fast_ns();
479
480 if (hw_tail == stream->oa_buffer.aging_tail &&
481 (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
482 /* If the HW tail hasn't move since the last check and the HW
483 * tail has been aging for long enough, declare it the new
484 * tail.
485 */
486 stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
487 } else {
488 u32 head, tail, aged_tail;
489
490 /* NB: The head we observe here might effectively be a little
491 * out of date. If a read() is in progress, the head could be
492 * anywhere between this head and stream->oa_buffer.tail.
493 */
494 head = stream->oa_buffer.head - gtt_offset;
495 aged_tail = stream->oa_buffer.tail - gtt_offset;
496
497 hw_tail -= gtt_offset;
498 tail = hw_tail;
499
500 /* Walk the stream backward until we find a report with dword 0
501 * & 1 not at 0. Since the circular buffer pointers progress by
502 * increments of 64 bytes and that reports can be up to 256
503 * bytes long, we can't tell whether a report has fully landed
504 * in memory before the first 2 dwords of the following report
505 * have effectively landed.
506 *
507 * This is assuming that the writes of the OA unit land in
508 * memory in the order they were written to.
509 * If not : (╯°□°)╯︵ ┻━┻
510 */
511 while (OA_TAKEN(tail, aged_tail) >= report_size) {
512 u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
513
514 if (report32[0] != 0 || report32[1] != 0)
515 break;
516
517 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
518 }
519
520 if (OA_TAKEN(hw_tail, tail) > report_size &&
521 __ratelimit(&stream->perf->tail_pointer_race))
522 DRM_NOTE("unlanded report(s) head=0x%x "
523 "tail=0x%x hw_tail=0x%x\n",
524 head, tail, hw_tail);
525
526 stream->oa_buffer.tail = gtt_offset + tail;
527 stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
528 stream->oa_buffer.aging_timestamp = now;
529 }
530
531 pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
532 stream->oa_buffer.head - gtt_offset) >= report_size;
533
534 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
535
536 return pollin;
537 }
538
539 /**
540 * append_oa_status - Appends a status record to a userspace read() buffer.
541 * @stream: An i915-perf stream opened for OA metrics
542 * @buf: destination buffer given by userspace
543 * @count: the number of bytes userspace wants to read
544 * @offset: (inout): the current position for writing into @buf
545 * @type: The kind of status to report to userspace
546 *
547 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
548 * into the userspace read() buffer.
549 *
550 * The @buf @offset will only be updated on success.
551 *
552 * Returns: 0 on success, negative error code on failure.
553 */
append_oa_status(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,enum drm_i915_perf_record_type type)554 static int append_oa_status(struct i915_perf_stream *stream,
555 char __user *buf,
556 size_t count,
557 size_t *offset,
558 enum drm_i915_perf_record_type type)
559 {
560 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
561
562 if ((count - *offset) < header.size)
563 return -ENOSPC;
564
565 if (copy_to_user(buf + *offset, &header, sizeof(header)))
566 return -EFAULT;
567
568 (*offset) += header.size;
569
570 return 0;
571 }
572
573 /**
574 * append_oa_sample - Copies single OA report into userspace read() buffer.
575 * @stream: An i915-perf stream opened for OA metrics
576 * @buf: destination buffer given by userspace
577 * @count: the number of bytes userspace wants to read
578 * @offset: (inout): the current position for writing into @buf
579 * @report: A single OA report to (optionally) include as part of the sample
580 *
581 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
582 * properties when opening a stream, tracked as `stream->sample_flags`. This
583 * function copies the requested components of a single sample to the given
584 * read() @buf.
585 *
586 * The @buf @offset will only be updated on success.
587 *
588 * Returns: 0 on success, negative error code on failure.
589 */
append_oa_sample(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)590 static int append_oa_sample(struct i915_perf_stream *stream,
591 char __user *buf,
592 size_t count,
593 size_t *offset,
594 const u8 *report)
595 {
596 int report_size = stream->oa_buffer.format_size;
597 struct drm_i915_perf_record_header header;
598
599 header.type = DRM_I915_PERF_RECORD_SAMPLE;
600 header.pad = 0;
601 header.size = stream->sample_size;
602
603 if ((count - *offset) < header.size)
604 return -ENOSPC;
605
606 buf += *offset;
607 if (copy_to_user(buf, &header, sizeof(header)))
608 return -EFAULT;
609 buf += sizeof(header);
610
611 if (copy_to_user(buf, report, report_size))
612 return -EFAULT;
613
614 (*offset) += header.size;
615
616 return 0;
617 }
618
619 /**
620 * gen8_append_oa_reports - Copies all buffered OA reports into
621 * userspace read() buffer.
622 * @stream: An i915-perf stream opened for OA metrics
623 * @buf: destination buffer given by userspace
624 * @count: the number of bytes userspace wants to read
625 * @offset: (inout): the current position for writing into @buf
626 *
627 * Notably any error condition resulting in a short read (-%ENOSPC or
628 * -%EFAULT) will be returned even though one or more records may
629 * have been successfully copied. In this case it's up to the caller
630 * to decide if the error should be squashed before returning to
631 * userspace.
632 *
633 * Note: reports are consumed from the head, and appended to the
634 * tail, so the tail chases the head?... If you think that's mad
635 * and back-to-front you're not alone, but this follows the
636 * Gen PRM naming convention.
637 *
638 * Returns: 0 on success, negative error code on failure.
639 */
gen8_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)640 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
641 char __user *buf,
642 size_t count,
643 size_t *offset)
644 {
645 struct intel_uncore *uncore = stream->uncore;
646 int report_size = stream->oa_buffer.format_size;
647 u8 *oa_buf_base = stream->oa_buffer.vaddr;
648 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
649 u32 mask = (OA_BUFFER_SIZE - 1);
650 size_t start_offset = *offset;
651 unsigned long flags;
652 u32 head, tail;
653 u32 taken;
654 int ret = 0;
655
656 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
657 return -EIO;
658
659 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
660
661 head = stream->oa_buffer.head;
662 tail = stream->oa_buffer.tail;
663
664 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
665
666 /*
667 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
668 * while indexing relative to oa_buf_base.
669 */
670 head -= gtt_offset;
671 tail -= gtt_offset;
672
673 /*
674 * An out of bounds or misaligned head or tail pointer implies a driver
675 * bug since we validate + align the tail pointers we read from the
676 * hardware and we are in full control of the head pointer which should
677 * only be incremented by multiples of the report size (notably also
678 * all a power of two).
679 */
680 if (drm_WARN_ONCE(&uncore->i915->drm,
681 head > OA_BUFFER_SIZE || head % report_size ||
682 tail > OA_BUFFER_SIZE || tail % report_size,
683 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
684 head, tail))
685 return -EIO;
686
687
688 for (/* none */;
689 (taken = OA_TAKEN(tail, head));
690 head = (head + report_size) & mask) {
691 u8 *report = oa_buf_base + head;
692 u32 *report32 = (void *)report;
693 u32 ctx_id;
694 u32 reason;
695
696 /*
697 * All the report sizes factor neatly into the buffer
698 * size so we never expect to see a report split
699 * between the beginning and end of the buffer.
700 *
701 * Given the initial alignment check a misalignment
702 * here would imply a driver bug that would result
703 * in an overrun.
704 */
705 if (drm_WARN_ON(&uncore->i915->drm,
706 (OA_BUFFER_SIZE - head) < report_size)) {
707 drm_err(&uncore->i915->drm,
708 "Spurious OA head ptr: non-integral report offset\n");
709 break;
710 }
711
712 /*
713 * The reason field includes flags identifying what
714 * triggered this specific report (mostly timer
715 * triggered or e.g. due to a context switch).
716 *
717 * This field is never expected to be zero so we can
718 * check that the report isn't invalid before copying
719 * it to userspace...
720 */
721 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
722 (GRAPHICS_VER(stream->perf->i915) == 12 ?
723 OAREPORT_REASON_MASK_EXTENDED :
724 OAREPORT_REASON_MASK));
725
726 ctx_id = report32[2] & stream->specific_ctx_id_mask;
727
728 /*
729 * Squash whatever is in the CTX_ID field if it's marked as
730 * invalid to be sure we avoid false-positive, single-context
731 * filtering below...
732 *
733 * Note: that we don't clear the valid_ctx_bit so userspace can
734 * understand that the ID has been squashed by the kernel.
735 */
736 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
737 GRAPHICS_VER(stream->perf->i915) <= 11)
738 ctx_id = report32[2] = INVALID_CTX_ID;
739
740 /*
741 * NB: For Gen 8 the OA unit no longer supports clock gating
742 * off for a specific context and the kernel can't securely
743 * stop the counters from updating as system-wide / global
744 * values.
745 *
746 * Automatic reports now include a context ID so reports can be
747 * filtered on the cpu but it's not worth trying to
748 * automatically subtract/hide counter progress for other
749 * contexts while filtering since we can't stop userspace
750 * issuing MI_REPORT_PERF_COUNT commands which would still
751 * provide a side-band view of the real values.
752 *
753 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
754 * to normalize counters for a single filtered context then it
755 * needs be forwarded bookend context-switch reports so that it
756 * can track switches in between MI_REPORT_PERF_COUNT commands
757 * and can itself subtract/ignore the progress of counters
758 * associated with other contexts. Note that the hardware
759 * automatically triggers reports when switching to a new
760 * context which are tagged with the ID of the newly active
761 * context. To avoid the complexity (and likely fragility) of
762 * reading ahead while parsing reports to try and minimize
763 * forwarding redundant context switch reports (i.e. between
764 * other, unrelated contexts) we simply elect to forward them
765 * all.
766 *
767 * We don't rely solely on the reason field to identify context
768 * switches since it's not-uncommon for periodic samples to
769 * identify a switch before any 'context switch' report.
770 */
771 if (!stream->perf->exclusive_stream->ctx ||
772 stream->specific_ctx_id == ctx_id ||
773 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
774 reason & OAREPORT_REASON_CTX_SWITCH) {
775
776 /*
777 * While filtering for a single context we avoid
778 * leaking the IDs of other contexts.
779 */
780 if (stream->perf->exclusive_stream->ctx &&
781 stream->specific_ctx_id != ctx_id) {
782 report32[2] = INVALID_CTX_ID;
783 }
784
785 ret = append_oa_sample(stream, buf, count, offset,
786 report);
787 if (ret)
788 break;
789
790 stream->oa_buffer.last_ctx_id = ctx_id;
791 }
792
793 /*
794 * Clear out the first 2 dword as a mean to detect unlanded
795 * reports.
796 */
797 report32[0] = 0;
798 report32[1] = 0;
799 }
800
801 if (start_offset != *offset) {
802 i915_reg_t oaheadptr;
803
804 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
805 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
806
807 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
808
809 /*
810 * We removed the gtt_offset for the copy loop above, indexing
811 * relative to oa_buf_base so put back here...
812 */
813 head += gtt_offset;
814 intel_uncore_write(uncore, oaheadptr,
815 head & GEN12_OAG_OAHEADPTR_MASK);
816 stream->oa_buffer.head = head;
817
818 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
819 }
820
821 return ret;
822 }
823
824 /**
825 * gen8_oa_read - copy status records then buffered OA reports
826 * @stream: An i915-perf stream opened for OA metrics
827 * @buf: destination buffer given by userspace
828 * @count: the number of bytes userspace wants to read
829 * @offset: (inout): the current position for writing into @buf
830 *
831 * Checks OA unit status registers and if necessary appends corresponding
832 * status records for userspace (such as for a buffer full condition) and then
833 * initiate appending any buffered OA reports.
834 *
835 * Updates @offset according to the number of bytes successfully copied into
836 * the userspace buffer.
837 *
838 * NB: some data may be successfully copied to the userspace buffer
839 * even if an error is returned, and this is reflected in the
840 * updated @offset.
841 *
842 * Returns: zero on success or a negative error code
843 */
gen8_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)844 static int gen8_oa_read(struct i915_perf_stream *stream,
845 char __user *buf,
846 size_t count,
847 size_t *offset)
848 {
849 struct intel_uncore *uncore = stream->uncore;
850 u32 oastatus;
851 i915_reg_t oastatus_reg;
852 int ret;
853
854 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
855 return -EIO;
856
857 oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
858 GEN12_OAG_OASTATUS : GEN8_OASTATUS;
859
860 oastatus = intel_uncore_read(uncore, oastatus_reg);
861
862 /*
863 * We treat OABUFFER_OVERFLOW as a significant error:
864 *
865 * Although theoretically we could handle this more gracefully
866 * sometimes, some Gens don't correctly suppress certain
867 * automatically triggered reports in this condition and so we
868 * have to assume that old reports are now being trampled
869 * over.
870 *
871 * Considering how we don't currently give userspace control
872 * over the OA buffer size and always configure a large 16MB
873 * buffer, then a buffer overflow does anyway likely indicate
874 * that something has gone quite badly wrong.
875 */
876 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
877 ret = append_oa_status(stream, buf, count, offset,
878 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
879 if (ret)
880 return ret;
881
882 drm_dbg(&stream->perf->i915->drm,
883 "OA buffer overflow (exponent = %d): force restart\n",
884 stream->period_exponent);
885
886 stream->perf->ops.oa_disable(stream);
887 stream->perf->ops.oa_enable(stream);
888
889 /*
890 * Note: .oa_enable() is expected to re-init the oabuffer and
891 * reset GEN8_OASTATUS for us
892 */
893 oastatus = intel_uncore_read(uncore, oastatus_reg);
894 }
895
896 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
897 ret = append_oa_status(stream, buf, count, offset,
898 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
899 if (ret)
900 return ret;
901
902 intel_uncore_rmw(uncore, oastatus_reg,
903 GEN8_OASTATUS_COUNTER_OVERFLOW |
904 GEN8_OASTATUS_REPORT_LOST,
905 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
906 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
907 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
908 }
909
910 return gen8_append_oa_reports(stream, buf, count, offset);
911 }
912
913 /**
914 * gen7_append_oa_reports - Copies all buffered OA reports into
915 * userspace read() buffer.
916 * @stream: An i915-perf stream opened for OA metrics
917 * @buf: destination buffer given by userspace
918 * @count: the number of bytes userspace wants to read
919 * @offset: (inout): the current position for writing into @buf
920 *
921 * Notably any error condition resulting in a short read (-%ENOSPC or
922 * -%EFAULT) will be returned even though one or more records may
923 * have been successfully copied. In this case it's up to the caller
924 * to decide if the error should be squashed before returning to
925 * userspace.
926 *
927 * Note: reports are consumed from the head, and appended to the
928 * tail, so the tail chases the head?... If you think that's mad
929 * and back-to-front you're not alone, but this follows the
930 * Gen PRM naming convention.
931 *
932 * Returns: 0 on success, negative error code on failure.
933 */
gen7_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)934 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
935 char __user *buf,
936 size_t count,
937 size_t *offset)
938 {
939 struct intel_uncore *uncore = stream->uncore;
940 int report_size = stream->oa_buffer.format_size;
941 u8 *oa_buf_base = stream->oa_buffer.vaddr;
942 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
943 u32 mask = (OA_BUFFER_SIZE - 1);
944 size_t start_offset = *offset;
945 unsigned long flags;
946 u32 head, tail;
947 u32 taken;
948 int ret = 0;
949
950 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
951 return -EIO;
952
953 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
954
955 head = stream->oa_buffer.head;
956 tail = stream->oa_buffer.tail;
957
958 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
959
960 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
961 * while indexing relative to oa_buf_base.
962 */
963 head -= gtt_offset;
964 tail -= gtt_offset;
965
966 /* An out of bounds or misaligned head or tail pointer implies a driver
967 * bug since we validate + align the tail pointers we read from the
968 * hardware and we are in full control of the head pointer which should
969 * only be incremented by multiples of the report size (notably also
970 * all a power of two).
971 */
972 if (drm_WARN_ONCE(&uncore->i915->drm,
973 head > OA_BUFFER_SIZE || head % report_size ||
974 tail > OA_BUFFER_SIZE || tail % report_size,
975 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
976 head, tail))
977 return -EIO;
978
979
980 for (/* none */;
981 (taken = OA_TAKEN(tail, head));
982 head = (head + report_size) & mask) {
983 u8 *report = oa_buf_base + head;
984 u32 *report32 = (void *)report;
985
986 /* All the report sizes factor neatly into the buffer
987 * size so we never expect to see a report split
988 * between the beginning and end of the buffer.
989 *
990 * Given the initial alignment check a misalignment
991 * here would imply a driver bug that would result
992 * in an overrun.
993 */
994 if (drm_WARN_ON(&uncore->i915->drm,
995 (OA_BUFFER_SIZE - head) < report_size)) {
996 drm_err(&uncore->i915->drm,
997 "Spurious OA head ptr: non-integral report offset\n");
998 break;
999 }
1000
1001 /* The report-ID field for periodic samples includes
1002 * some undocumented flags related to what triggered
1003 * the report and is never expected to be zero so we
1004 * can check that the report isn't invalid before
1005 * copying it to userspace...
1006 */
1007 if (report32[0] == 0) {
1008 if (__ratelimit(&stream->perf->spurious_report_rs))
1009 DRM_NOTE("Skipping spurious, invalid OA report\n");
1010 continue;
1011 }
1012
1013 ret = append_oa_sample(stream, buf, count, offset, report);
1014 if (ret)
1015 break;
1016
1017 /* Clear out the first 2 dwords as a mean to detect unlanded
1018 * reports.
1019 */
1020 report32[0] = 0;
1021 report32[1] = 0;
1022 }
1023
1024 if (start_offset != *offset) {
1025 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1026
1027 /* We removed the gtt_offset for the copy loop above, indexing
1028 * relative to oa_buf_base so put back here...
1029 */
1030 head += gtt_offset;
1031
1032 intel_uncore_write(uncore, GEN7_OASTATUS2,
1033 (head & GEN7_OASTATUS2_HEAD_MASK) |
1034 GEN7_OASTATUS2_MEM_SELECT_GGTT);
1035 stream->oa_buffer.head = head;
1036
1037 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1038 }
1039
1040 return ret;
1041 }
1042
1043 /**
1044 * gen7_oa_read - copy status records then buffered OA reports
1045 * @stream: An i915-perf stream opened for OA metrics
1046 * @buf: destination buffer given by userspace
1047 * @count: the number of bytes userspace wants to read
1048 * @offset: (inout): the current position for writing into @buf
1049 *
1050 * Checks Gen 7 specific OA unit status registers and if necessary appends
1051 * corresponding status records for userspace (such as for a buffer full
1052 * condition) and then initiate appending any buffered OA reports.
1053 *
1054 * Updates @offset according to the number of bytes successfully copied into
1055 * the userspace buffer.
1056 *
1057 * Returns: zero on success or a negative error code
1058 */
gen7_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1059 static int gen7_oa_read(struct i915_perf_stream *stream,
1060 char __user *buf,
1061 size_t count,
1062 size_t *offset)
1063 {
1064 struct intel_uncore *uncore = stream->uncore;
1065 u32 oastatus1;
1066 int ret;
1067
1068 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1069 return -EIO;
1070
1071 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1072
1073 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1074 * bits while the OA unit is enabled (while the tail pointer
1075 * may be updated asynchronously) so we ignore status bits
1076 * that have already been reported to userspace.
1077 */
1078 oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1079
1080 /* We treat OABUFFER_OVERFLOW as a significant error:
1081 *
1082 * - The status can be interpreted to mean that the buffer is
1083 * currently full (with a higher precedence than OA_TAKEN()
1084 * which will start to report a near-empty buffer after an
1085 * overflow) but it's awkward that we can't clear the status
1086 * on Haswell, so without a reset we won't be able to catch
1087 * the state again.
1088 *
1089 * - Since it also implies the HW has started overwriting old
1090 * reports it may also affect our sanity checks for invalid
1091 * reports when copying to userspace that assume new reports
1092 * are being written to cleared memory.
1093 *
1094 * - In the future we may want to introduce a flight recorder
1095 * mode where the driver will automatically maintain a safe
1096 * guard band between head/tail, avoiding this overflow
1097 * condition, but we avoid the added driver complexity for
1098 * now.
1099 */
1100 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1101 ret = append_oa_status(stream, buf, count, offset,
1102 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1103 if (ret)
1104 return ret;
1105
1106 drm_dbg(&stream->perf->i915->drm,
1107 "OA buffer overflow (exponent = %d): force restart\n",
1108 stream->period_exponent);
1109
1110 stream->perf->ops.oa_disable(stream);
1111 stream->perf->ops.oa_enable(stream);
1112
1113 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1114 }
1115
1116 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1117 ret = append_oa_status(stream, buf, count, offset,
1118 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1119 if (ret)
1120 return ret;
1121 stream->perf->gen7_latched_oastatus1 |=
1122 GEN7_OASTATUS1_REPORT_LOST;
1123 }
1124
1125 return gen7_append_oa_reports(stream, buf, count, offset);
1126 }
1127
1128 /**
1129 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1130 * @stream: An i915-perf stream opened for OA metrics
1131 *
1132 * Called when userspace tries to read() from a blocking stream FD opened
1133 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1134 * OA buffer and wakes us.
1135 *
1136 * Note: it's acceptable to have this return with some false positives
1137 * since any subsequent read handling will return -EAGAIN if there isn't
1138 * really data ready for userspace yet.
1139 *
1140 * Returns: zero on success or a negative error code
1141 */
i915_oa_wait_unlocked(struct i915_perf_stream * stream)1142 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1143 {
1144 /* We would wait indefinitely if periodic sampling is not enabled */
1145 if (!stream->periodic)
1146 return -EIO;
1147
1148 return wait_event_interruptible(stream->poll_wq,
1149 oa_buffer_check_unlocked(stream));
1150 }
1151
1152 /**
1153 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1154 * @stream: An i915-perf stream opened for OA metrics
1155 * @file: An i915 perf stream file
1156 * @wait: poll() state table
1157 *
1158 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1159 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1160 * when it sees data ready to read in the circular OA buffer.
1161 */
i915_oa_poll_wait(struct i915_perf_stream * stream,struct file * file,poll_table * wait)1162 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1163 struct file *file,
1164 poll_table *wait)
1165 {
1166 poll_wait(file, &stream->poll_wq, wait);
1167 }
1168
1169 /**
1170 * i915_oa_read - just calls through to &i915_oa_ops->read
1171 * @stream: An i915-perf stream opened for OA metrics
1172 * @buf: destination buffer given by userspace
1173 * @count: the number of bytes userspace wants to read
1174 * @offset: (inout): the current position for writing into @buf
1175 *
1176 * Updates @offset according to the number of bytes successfully copied into
1177 * the userspace buffer.
1178 *
1179 * Returns: zero on success or a negative error code
1180 */
i915_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1181 static int i915_oa_read(struct i915_perf_stream *stream,
1182 char __user *buf,
1183 size_t count,
1184 size_t *offset)
1185 {
1186 return stream->perf->ops.read(stream, buf, count, offset);
1187 }
1188
oa_pin_context(struct i915_perf_stream * stream)1189 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1190 {
1191 struct i915_gem_engines_iter it;
1192 struct i915_gem_context *ctx = stream->ctx;
1193 struct intel_context *ce;
1194 struct i915_gem_ww_ctx ww;
1195 int err = -ENODEV;
1196
1197 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1198 if (ce->engine != stream->engine) /* first match! */
1199 continue;
1200
1201 err = 0;
1202 break;
1203 }
1204 i915_gem_context_unlock_engines(ctx);
1205
1206 if (err)
1207 return ERR_PTR(err);
1208
1209 i915_gem_ww_ctx_init(&ww, true);
1210 retry:
1211 /*
1212 * As the ID is the gtt offset of the context's vma we
1213 * pin the vma to ensure the ID remains fixed.
1214 */
1215 err = intel_context_pin_ww(ce, &ww);
1216 if (err == -EDEADLK) {
1217 err = i915_gem_ww_ctx_backoff(&ww);
1218 if (!err)
1219 goto retry;
1220 }
1221 i915_gem_ww_ctx_fini(&ww);
1222
1223 if (err)
1224 return ERR_PTR(err);
1225
1226 stream->pinned_ctx = ce;
1227 return stream->pinned_ctx;
1228 }
1229
1230 /**
1231 * oa_get_render_ctx_id - determine and hold ctx hw id
1232 * @stream: An i915-perf stream opened for OA metrics
1233 *
1234 * Determine the render context hw id, and ensure it remains fixed for the
1235 * lifetime of the stream. This ensures that we don't have to worry about
1236 * updating the context ID in OACONTROL on the fly.
1237 *
1238 * Returns: zero on success or a negative error code
1239 */
oa_get_render_ctx_id(struct i915_perf_stream * stream)1240 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1241 {
1242 struct intel_context *ce;
1243
1244 ce = oa_pin_context(stream);
1245 if (IS_ERR(ce))
1246 return PTR_ERR(ce);
1247
1248 switch (GRAPHICS_VER(ce->engine->i915)) {
1249 case 7: {
1250 /*
1251 * On Haswell we don't do any post processing of the reports
1252 * and don't need to use the mask.
1253 */
1254 stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1255 stream->specific_ctx_id_mask = 0;
1256 break;
1257 }
1258
1259 case 8:
1260 case 9:
1261 if (intel_engine_uses_guc(ce->engine)) {
1262 /*
1263 * When using GuC, the context descriptor we write in
1264 * i915 is read by GuC and rewritten before it's
1265 * actually written into the hardware. The LRCA is
1266 * what is put into the context id field of the
1267 * context descriptor by GuC. Because it's aligned to
1268 * a page, the lower 12bits are always at 0 and
1269 * dropped by GuC. They won't be part of the context
1270 * ID in the OA reports, so squash those lower bits.
1271 */
1272 stream->specific_ctx_id = ce->lrc.lrca >> 12;
1273
1274 /*
1275 * GuC uses the top bit to signal proxy submission, so
1276 * ignore that bit.
1277 */
1278 stream->specific_ctx_id_mask =
1279 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1280 } else {
1281 stream->specific_ctx_id_mask =
1282 (1U << GEN8_CTX_ID_WIDTH) - 1;
1283 stream->specific_ctx_id = stream->specific_ctx_id_mask;
1284 }
1285 break;
1286
1287 case 11:
1288 case 12:
1289 if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) {
1290 stream->specific_ctx_id_mask =
1291 ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1292 (XEHP_SW_CTX_ID_SHIFT - 32);
1293 stream->specific_ctx_id =
1294 (XEHP_MAX_CONTEXT_HW_ID - 1) <<
1295 (XEHP_SW_CTX_ID_SHIFT - 32);
1296 } else {
1297 stream->specific_ctx_id_mask =
1298 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1299 /*
1300 * Pick an unused context id
1301 * 0 - BITS_PER_LONG are used by other contexts
1302 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1303 */
1304 stream->specific_ctx_id =
1305 (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1306 }
1307 break;
1308
1309 default:
1310 MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1311 }
1312
1313 ce->tag = stream->specific_ctx_id;
1314
1315 drm_dbg(&stream->perf->i915->drm,
1316 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1317 stream->specific_ctx_id,
1318 stream->specific_ctx_id_mask);
1319
1320 return 0;
1321 }
1322
1323 /**
1324 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1325 * @stream: An i915-perf stream opened for OA metrics
1326 *
1327 * In case anything needed doing to ensure the context HW ID would remain valid
1328 * for the lifetime of the stream, then that can be undone here.
1329 */
oa_put_render_ctx_id(struct i915_perf_stream * stream)1330 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1331 {
1332 struct intel_context *ce;
1333
1334 ce = fetch_and_zero(&stream->pinned_ctx);
1335 if (ce) {
1336 ce->tag = 0; /* recomputed on next submission after parking */
1337 intel_context_unpin(ce);
1338 }
1339
1340 stream->specific_ctx_id = INVALID_CTX_ID;
1341 stream->specific_ctx_id_mask = 0;
1342 }
1343
1344 static void
free_oa_buffer(struct i915_perf_stream * stream)1345 free_oa_buffer(struct i915_perf_stream *stream)
1346 {
1347 i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1348 I915_VMA_RELEASE_MAP);
1349
1350 stream->oa_buffer.vaddr = NULL;
1351 }
1352
1353 static void
free_oa_configs(struct i915_perf_stream * stream)1354 free_oa_configs(struct i915_perf_stream *stream)
1355 {
1356 struct i915_oa_config_bo *oa_bo, *tmp;
1357
1358 i915_oa_config_put(stream->oa_config);
1359 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1360 free_oa_config_bo(oa_bo);
1361 }
1362
1363 static void
free_noa_wait(struct i915_perf_stream * stream)1364 free_noa_wait(struct i915_perf_stream *stream)
1365 {
1366 i915_vma_unpin_and_release(&stream->noa_wait, 0);
1367 }
1368
i915_oa_stream_destroy(struct i915_perf_stream * stream)1369 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1370 {
1371 struct i915_perf *perf = stream->perf;
1372
1373 BUG_ON(stream != perf->exclusive_stream);
1374
1375 /*
1376 * Unset exclusive_stream first, it will be checked while disabling
1377 * the metric set on gen8+.
1378 *
1379 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1380 */
1381 WRITE_ONCE(perf->exclusive_stream, NULL);
1382 perf->ops.disable_metric_set(stream);
1383
1384 free_oa_buffer(stream);
1385
1386 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1387 intel_engine_pm_put(stream->engine);
1388
1389 if (stream->ctx)
1390 oa_put_render_ctx_id(stream);
1391
1392 free_oa_configs(stream);
1393 free_noa_wait(stream);
1394
1395 if (perf->spurious_report_rs.missed) {
1396 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1397 perf->spurious_report_rs.missed);
1398 }
1399 }
1400
gen7_init_oa_buffer(struct i915_perf_stream * stream)1401 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1402 {
1403 struct intel_uncore *uncore = stream->uncore;
1404 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1405 unsigned long flags;
1406
1407 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1408
1409 /* Pre-DevBDW: OABUFFER must be set with counters off,
1410 * before OASTATUS1, but after OASTATUS2
1411 */
1412 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1413 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1414 stream->oa_buffer.head = gtt_offset;
1415
1416 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1417
1418 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1419 gtt_offset | OABUFFER_SIZE_16M);
1420
1421 /* Mark that we need updated tail pointers to read from... */
1422 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1423 stream->oa_buffer.tail = gtt_offset;
1424
1425 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1426
1427 /* On Haswell we have to track which OASTATUS1 flags we've
1428 * already seen since they can't be cleared while periodic
1429 * sampling is enabled.
1430 */
1431 stream->perf->gen7_latched_oastatus1 = 0;
1432
1433 /* NB: although the OA buffer will initially be allocated
1434 * zeroed via shmfs (and so this memset is redundant when
1435 * first allocating), we may re-init the OA buffer, either
1436 * when re-enabling a stream or in error/reset paths.
1437 *
1438 * The reason we clear the buffer for each re-init is for the
1439 * sanity check in gen7_append_oa_reports() that looks at the
1440 * report-id field to make sure it's non-zero which relies on
1441 * the assumption that new reports are being written to zeroed
1442 * memory...
1443 */
1444 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1445 }
1446
gen8_init_oa_buffer(struct i915_perf_stream * stream)1447 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1448 {
1449 struct intel_uncore *uncore = stream->uncore;
1450 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1451 unsigned long flags;
1452
1453 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1454
1455 intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1456 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1457 stream->oa_buffer.head = gtt_offset;
1458
1459 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1460
1461 /*
1462 * PRM says:
1463 *
1464 * "This MMIO must be set before the OATAILPTR
1465 * register and after the OAHEADPTR register. This is
1466 * to enable proper functionality of the overflow
1467 * bit."
1468 */
1469 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1470 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1471 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1472
1473 /* Mark that we need updated tail pointers to read from... */
1474 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1475 stream->oa_buffer.tail = gtt_offset;
1476
1477 /*
1478 * Reset state used to recognise context switches, affecting which
1479 * reports we will forward to userspace while filtering for a single
1480 * context.
1481 */
1482 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1483
1484 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1485
1486 /*
1487 * NB: although the OA buffer will initially be allocated
1488 * zeroed via shmfs (and so this memset is redundant when
1489 * first allocating), we may re-init the OA buffer, either
1490 * when re-enabling a stream or in error/reset paths.
1491 *
1492 * The reason we clear the buffer for each re-init is for the
1493 * sanity check in gen8_append_oa_reports() that looks at the
1494 * reason field to make sure it's non-zero which relies on
1495 * the assumption that new reports are being written to zeroed
1496 * memory...
1497 */
1498 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1499 }
1500
gen12_init_oa_buffer(struct i915_perf_stream * stream)1501 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1502 {
1503 struct intel_uncore *uncore = stream->uncore;
1504 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1505 unsigned long flags;
1506
1507 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1508
1509 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1510 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1511 gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1512 stream->oa_buffer.head = gtt_offset;
1513
1514 /*
1515 * PRM says:
1516 *
1517 * "This MMIO must be set before the OATAILPTR
1518 * register and after the OAHEADPTR register. This is
1519 * to enable proper functionality of the overflow
1520 * bit."
1521 */
1522 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1523 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1524 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1525 gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1526
1527 /* Mark that we need updated tail pointers to read from... */
1528 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1529 stream->oa_buffer.tail = gtt_offset;
1530
1531 /*
1532 * Reset state used to recognise context switches, affecting which
1533 * reports we will forward to userspace while filtering for a single
1534 * context.
1535 */
1536 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1537
1538 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1539
1540 /*
1541 * NB: although the OA buffer will initially be allocated
1542 * zeroed via shmfs (and so this memset is redundant when
1543 * first allocating), we may re-init the OA buffer, either
1544 * when re-enabling a stream or in error/reset paths.
1545 *
1546 * The reason we clear the buffer for each re-init is for the
1547 * sanity check in gen8_append_oa_reports() that looks at the
1548 * reason field to make sure it's non-zero which relies on
1549 * the assumption that new reports are being written to zeroed
1550 * memory...
1551 */
1552 memset(stream->oa_buffer.vaddr, 0,
1553 stream->oa_buffer.vma->size);
1554 }
1555
alloc_oa_buffer(struct i915_perf_stream * stream)1556 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1557 {
1558 struct drm_i915_private *i915 = stream->perf->i915;
1559 struct drm_i915_gem_object *bo;
1560 struct i915_vma *vma;
1561 int ret;
1562
1563 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1564 return -ENODEV;
1565
1566 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1567 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1568
1569 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1570 if (IS_ERR(bo)) {
1571 drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1572 return PTR_ERR(bo);
1573 }
1574
1575 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1576
1577 /* PreHSW required 512K alignment, HSW requires 16M */
1578 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1579 if (IS_ERR(vma)) {
1580 ret = PTR_ERR(vma);
1581 goto err_unref;
1582 }
1583 stream->oa_buffer.vma = vma;
1584
1585 stream->oa_buffer.vaddr =
1586 i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1587 if (IS_ERR(stream->oa_buffer.vaddr)) {
1588 ret = PTR_ERR(stream->oa_buffer.vaddr);
1589 goto err_unpin;
1590 }
1591
1592 return 0;
1593
1594 err_unpin:
1595 __i915_vma_unpin(vma);
1596
1597 err_unref:
1598 i915_gem_object_put(bo);
1599
1600 stream->oa_buffer.vaddr = NULL;
1601 stream->oa_buffer.vma = NULL;
1602
1603 return ret;
1604 }
1605
save_restore_register(struct i915_perf_stream * stream,u32 * cs,bool save,i915_reg_t reg,u32 offset,u32 dword_count)1606 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1607 bool save, i915_reg_t reg, u32 offset,
1608 u32 dword_count)
1609 {
1610 u32 cmd;
1611 u32 d;
1612
1613 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1614 cmd |= MI_SRM_LRM_GLOBAL_GTT;
1615 if (GRAPHICS_VER(stream->perf->i915) >= 8)
1616 cmd++;
1617
1618 for (d = 0; d < dword_count; d++) {
1619 *cs++ = cmd;
1620 *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1621 *cs++ = intel_gt_scratch_offset(stream->engine->gt,
1622 offset) + 4 * d;
1623 *cs++ = 0;
1624 }
1625
1626 return cs;
1627 }
1628
alloc_noa_wait(struct i915_perf_stream * stream)1629 static int alloc_noa_wait(struct i915_perf_stream *stream)
1630 {
1631 struct drm_i915_private *i915 = stream->perf->i915;
1632 struct drm_i915_gem_object *bo;
1633 struct i915_vma *vma;
1634 const u64 delay_ticks = 0xffffffffffffffff -
1635 intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
1636 atomic64_read(&stream->perf->noa_programming_delay));
1637 const u32 base = stream->engine->mmio_base;
1638 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1639 u32 *batch, *ts0, *cs, *jump;
1640 struct i915_gem_ww_ctx ww;
1641 int ret, i;
1642 enum {
1643 START_TS,
1644 NOW_TS,
1645 DELTA_TS,
1646 JUMP_PREDICATE,
1647 DELTA_TARGET,
1648 N_CS_GPR
1649 };
1650
1651 bo = i915_gem_object_create_internal(i915, 4096);
1652 if (IS_ERR(bo)) {
1653 drm_err(&i915->drm,
1654 "Failed to allocate NOA wait batchbuffer\n");
1655 return PTR_ERR(bo);
1656 }
1657
1658 i915_gem_ww_ctx_init(&ww, true);
1659 retry:
1660 ret = i915_gem_object_lock(bo, &ww);
1661 if (ret)
1662 goto out_ww;
1663
1664 /*
1665 * We pin in GGTT because we jump into this buffer now because
1666 * multiple OA config BOs will have a jump to this address and it
1667 * needs to be fixed during the lifetime of the i915/perf stream.
1668 */
1669 vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH);
1670 if (IS_ERR(vma)) {
1671 ret = PTR_ERR(vma);
1672 goto out_ww;
1673 }
1674
1675 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1676 if (IS_ERR(batch)) {
1677 ret = PTR_ERR(batch);
1678 goto err_unpin;
1679 }
1680
1681 /* Save registers. */
1682 for (i = 0; i < N_CS_GPR; i++)
1683 cs = save_restore_register(
1684 stream, cs, true /* save */, CS_GPR(i),
1685 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1686 cs = save_restore_register(
1687 stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
1688 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1689
1690 /* First timestamp snapshot location. */
1691 ts0 = cs;
1692
1693 /*
1694 * Initial snapshot of the timestamp register to implement the wait.
1695 * We work with 32b values, so clear out the top 32b bits of the
1696 * register because the ALU works 64bits.
1697 */
1698 *cs++ = MI_LOAD_REGISTER_IMM(1);
1699 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1700 *cs++ = 0;
1701 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1702 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1703 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1704
1705 /*
1706 * This is the location we're going to jump back into until the
1707 * required amount of time has passed.
1708 */
1709 jump = cs;
1710
1711 /*
1712 * Take another snapshot of the timestamp register. Take care to clear
1713 * up the top 32bits of CS_GPR(1) as we're using it for other
1714 * operations below.
1715 */
1716 *cs++ = MI_LOAD_REGISTER_IMM(1);
1717 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1718 *cs++ = 0;
1719 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1720 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1721 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1722
1723 /*
1724 * Do a diff between the 2 timestamps and store the result back into
1725 * CS_GPR(1).
1726 */
1727 *cs++ = MI_MATH(5);
1728 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1729 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1730 *cs++ = MI_MATH_SUB;
1731 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1732 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1733
1734 /*
1735 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1736 * timestamp have rolled over the 32bits) into the predicate register
1737 * to be used for the predicated jump.
1738 */
1739 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1740 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1741 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1742
1743 /* Restart from the beginning if we had timestamps roll over. */
1744 *cs++ = (GRAPHICS_VER(i915) < 8 ?
1745 MI_BATCH_BUFFER_START :
1746 MI_BATCH_BUFFER_START_GEN8) |
1747 MI_BATCH_PREDICATE;
1748 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1749 *cs++ = 0;
1750
1751 /*
1752 * Now add the diff between to previous timestamps and add it to :
1753 * (((1 * << 64) - 1) - delay_ns)
1754 *
1755 * When the Carry Flag contains 1 this means the elapsed time is
1756 * longer than the expected delay, and we can exit the wait loop.
1757 */
1758 *cs++ = MI_LOAD_REGISTER_IMM(2);
1759 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1760 *cs++ = lower_32_bits(delay_ticks);
1761 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1762 *cs++ = upper_32_bits(delay_ticks);
1763
1764 *cs++ = MI_MATH(4);
1765 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1766 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1767 *cs++ = MI_MATH_ADD;
1768 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1769
1770 *cs++ = MI_ARB_CHECK;
1771
1772 /*
1773 * Transfer the result into the predicate register to be used for the
1774 * predicated jump.
1775 */
1776 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1777 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1778 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1779
1780 /* Predicate the jump. */
1781 *cs++ = (GRAPHICS_VER(i915) < 8 ?
1782 MI_BATCH_BUFFER_START :
1783 MI_BATCH_BUFFER_START_GEN8) |
1784 MI_BATCH_PREDICATE;
1785 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1786 *cs++ = 0;
1787
1788 /* Restore registers. */
1789 for (i = 0; i < N_CS_GPR; i++)
1790 cs = save_restore_register(
1791 stream, cs, false /* restore */, CS_GPR(i),
1792 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1793 cs = save_restore_register(
1794 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
1795 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1796
1797 /* And return to the ring. */
1798 *cs++ = MI_BATCH_BUFFER_END;
1799
1800 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1801
1802 i915_gem_object_flush_map(bo);
1803 __i915_gem_object_release_map(bo);
1804
1805 stream->noa_wait = vma;
1806 goto out_ww;
1807
1808 err_unpin:
1809 i915_vma_unpin_and_release(&vma, 0);
1810 out_ww:
1811 if (ret == -EDEADLK) {
1812 ret = i915_gem_ww_ctx_backoff(&ww);
1813 if (!ret)
1814 goto retry;
1815 }
1816 i915_gem_ww_ctx_fini(&ww);
1817 if (ret)
1818 i915_gem_object_put(bo);
1819 return ret;
1820 }
1821
write_cs_mi_lri(u32 * cs,const struct i915_oa_reg * reg_data,u32 n_regs)1822 static u32 *write_cs_mi_lri(u32 *cs,
1823 const struct i915_oa_reg *reg_data,
1824 u32 n_regs)
1825 {
1826 u32 i;
1827
1828 for (i = 0; i < n_regs; i++) {
1829 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1830 u32 n_lri = min_t(u32,
1831 n_regs - i,
1832 MI_LOAD_REGISTER_IMM_MAX_REGS);
1833
1834 *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1835 }
1836 *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1837 *cs++ = reg_data[i].value;
1838 }
1839
1840 return cs;
1841 }
1842
num_lri_dwords(int num_regs)1843 static int num_lri_dwords(int num_regs)
1844 {
1845 int count = 0;
1846
1847 if (num_regs > 0) {
1848 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1849 count += num_regs * 2;
1850 }
1851
1852 return count;
1853 }
1854
1855 static struct i915_oa_config_bo *
alloc_oa_config_buffer(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)1856 alloc_oa_config_buffer(struct i915_perf_stream *stream,
1857 struct i915_oa_config *oa_config)
1858 {
1859 struct drm_i915_gem_object *obj;
1860 struct i915_oa_config_bo *oa_bo;
1861 struct i915_gem_ww_ctx ww;
1862 size_t config_length = 0;
1863 u32 *cs;
1864 int err;
1865
1866 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1867 if (!oa_bo)
1868 return ERR_PTR(-ENOMEM);
1869
1870 config_length += num_lri_dwords(oa_config->mux_regs_len);
1871 config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1872 config_length += num_lri_dwords(oa_config->flex_regs_len);
1873 config_length += 3; /* MI_BATCH_BUFFER_START */
1874 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1875
1876 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1877 if (IS_ERR(obj)) {
1878 err = PTR_ERR(obj);
1879 goto err_free;
1880 }
1881
1882 i915_gem_ww_ctx_init(&ww, true);
1883 retry:
1884 err = i915_gem_object_lock(obj, &ww);
1885 if (err)
1886 goto out_ww;
1887
1888 cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1889 if (IS_ERR(cs)) {
1890 err = PTR_ERR(cs);
1891 goto out_ww;
1892 }
1893
1894 cs = write_cs_mi_lri(cs,
1895 oa_config->mux_regs,
1896 oa_config->mux_regs_len);
1897 cs = write_cs_mi_lri(cs,
1898 oa_config->b_counter_regs,
1899 oa_config->b_counter_regs_len);
1900 cs = write_cs_mi_lri(cs,
1901 oa_config->flex_regs,
1902 oa_config->flex_regs_len);
1903
1904 /* Jump into the active wait. */
1905 *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
1906 MI_BATCH_BUFFER_START :
1907 MI_BATCH_BUFFER_START_GEN8);
1908 *cs++ = i915_ggtt_offset(stream->noa_wait);
1909 *cs++ = 0;
1910
1911 i915_gem_object_flush_map(obj);
1912 __i915_gem_object_release_map(obj);
1913
1914 oa_bo->vma = i915_vma_instance(obj,
1915 &stream->engine->gt->ggtt->vm,
1916 NULL);
1917 if (IS_ERR(oa_bo->vma)) {
1918 err = PTR_ERR(oa_bo->vma);
1919 goto out_ww;
1920 }
1921
1922 oa_bo->oa_config = i915_oa_config_get(oa_config);
1923 llist_add(&oa_bo->node, &stream->oa_config_bos);
1924
1925 out_ww:
1926 if (err == -EDEADLK) {
1927 err = i915_gem_ww_ctx_backoff(&ww);
1928 if (!err)
1929 goto retry;
1930 }
1931 i915_gem_ww_ctx_fini(&ww);
1932
1933 if (err)
1934 i915_gem_object_put(obj);
1935 err_free:
1936 if (err) {
1937 kfree(oa_bo);
1938 return ERR_PTR(err);
1939 }
1940 return oa_bo;
1941 }
1942
1943 static struct i915_vma *
get_oa_vma(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)1944 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1945 {
1946 struct i915_oa_config_bo *oa_bo;
1947
1948 /*
1949 * Look for the buffer in the already allocated BOs attached
1950 * to the stream.
1951 */
1952 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1953 if (oa_bo->oa_config == oa_config &&
1954 memcmp(oa_bo->oa_config->uuid,
1955 oa_config->uuid,
1956 sizeof(oa_config->uuid)) == 0)
1957 goto out;
1958 }
1959
1960 oa_bo = alloc_oa_config_buffer(stream, oa_config);
1961 if (IS_ERR(oa_bo))
1962 return ERR_CAST(oa_bo);
1963
1964 out:
1965 return i915_vma_get(oa_bo->vma);
1966 }
1967
1968 static int
emit_oa_config(struct i915_perf_stream * stream,struct i915_oa_config * oa_config,struct intel_context * ce,struct i915_active * active)1969 emit_oa_config(struct i915_perf_stream *stream,
1970 struct i915_oa_config *oa_config,
1971 struct intel_context *ce,
1972 struct i915_active *active)
1973 {
1974 struct i915_request *rq;
1975 struct i915_vma *vma;
1976 struct i915_gem_ww_ctx ww;
1977 int err;
1978
1979 vma = get_oa_vma(stream, oa_config);
1980 if (IS_ERR(vma))
1981 return PTR_ERR(vma);
1982
1983 i915_gem_ww_ctx_init(&ww, true);
1984 retry:
1985 err = i915_gem_object_lock(vma->obj, &ww);
1986 if (err)
1987 goto err;
1988
1989 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1990 if (err)
1991 goto err;
1992
1993 intel_engine_pm_get(ce->engine);
1994 rq = i915_request_create(ce);
1995 intel_engine_pm_put(ce->engine);
1996 if (IS_ERR(rq)) {
1997 err = PTR_ERR(rq);
1998 goto err_vma_unpin;
1999 }
2000
2001 if (!IS_ERR_OR_NULL(active)) {
2002 /* After all individual context modifications */
2003 err = i915_request_await_active(rq, active,
2004 I915_ACTIVE_AWAIT_ACTIVE);
2005 if (err)
2006 goto err_add_request;
2007
2008 err = i915_active_add_request(active, rq);
2009 if (err)
2010 goto err_add_request;
2011 }
2012
2013 err = i915_request_await_object(rq, vma->obj, 0);
2014 if (!err)
2015 err = i915_vma_move_to_active(vma, rq, 0);
2016 if (err)
2017 goto err_add_request;
2018
2019 err = rq->engine->emit_bb_start(rq,
2020 vma->node.start, 0,
2021 I915_DISPATCH_SECURE);
2022 if (err)
2023 goto err_add_request;
2024
2025 err_add_request:
2026 i915_request_add(rq);
2027 err_vma_unpin:
2028 i915_vma_unpin(vma);
2029 err:
2030 if (err == -EDEADLK) {
2031 err = i915_gem_ww_ctx_backoff(&ww);
2032 if (!err)
2033 goto retry;
2034 }
2035
2036 i915_gem_ww_ctx_fini(&ww);
2037 i915_vma_put(vma);
2038 return err;
2039 }
2040
oa_context(struct i915_perf_stream * stream)2041 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2042 {
2043 return stream->pinned_ctx ?: stream->engine->kernel_context;
2044 }
2045
2046 static int
hsw_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2047 hsw_enable_metric_set(struct i915_perf_stream *stream,
2048 struct i915_active *active)
2049 {
2050 struct intel_uncore *uncore = stream->uncore;
2051
2052 /*
2053 * PRM:
2054 *
2055 * OA unit is using “crclk” for its functionality. When trunk
2056 * level clock gating takes place, OA clock would be gated,
2057 * unable to count the events from non-render clock domain.
2058 * Render clock gating must be disabled when OA is enabled to
2059 * count the events from non-render domain. Unit level clock
2060 * gating for RCS should also be disabled.
2061 */
2062 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2063 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2064 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2065 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2066
2067 return emit_oa_config(stream,
2068 stream->oa_config, oa_context(stream),
2069 active);
2070 }
2071
hsw_disable_metric_set(struct i915_perf_stream * stream)2072 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2073 {
2074 struct intel_uncore *uncore = stream->uncore;
2075
2076 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2077 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2078 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2079 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2080
2081 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2082 }
2083
oa_config_flex_reg(const struct i915_oa_config * oa_config,i915_reg_t reg)2084 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2085 i915_reg_t reg)
2086 {
2087 u32 mmio = i915_mmio_reg_offset(reg);
2088 int i;
2089
2090 /*
2091 * This arbitrary default will select the 'EU FPU0 Pipeline
2092 * Active' event. In the future it's anticipated that there
2093 * will be an explicit 'No Event' we can select, but not yet...
2094 */
2095 if (!oa_config)
2096 return 0;
2097
2098 for (i = 0; i < oa_config->flex_regs_len; i++) {
2099 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2100 return oa_config->flex_regs[i].value;
2101 }
2102
2103 return 0;
2104 }
2105 /*
2106 * NB: It must always remain pointer safe to run this even if the OA unit
2107 * has been disabled.
2108 *
2109 * It's fine to put out-of-date values into these per-context registers
2110 * in the case that the OA unit has been disabled.
2111 */
2112 static void
gen8_update_reg_state_unlocked(const struct intel_context * ce,const struct i915_perf_stream * stream)2113 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2114 const struct i915_perf_stream *stream)
2115 {
2116 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2117 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2118 /* The MMIO offsets for Flex EU registers aren't contiguous */
2119 i915_reg_t flex_regs[] = {
2120 EU_PERF_CNTL0,
2121 EU_PERF_CNTL1,
2122 EU_PERF_CNTL2,
2123 EU_PERF_CNTL3,
2124 EU_PERF_CNTL4,
2125 EU_PERF_CNTL5,
2126 EU_PERF_CNTL6,
2127 };
2128 u32 *reg_state = ce->lrc_reg_state;
2129 int i;
2130
2131 reg_state[ctx_oactxctrl + 1] =
2132 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2133 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2134 GEN8_OA_COUNTER_RESUME;
2135
2136 for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2137 reg_state[ctx_flexeu0 + i * 2 + 1] =
2138 oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2139 }
2140
2141 struct flex {
2142 i915_reg_t reg;
2143 u32 offset;
2144 u32 value;
2145 };
2146
2147 static int
gen8_store_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2148 gen8_store_flex(struct i915_request *rq,
2149 struct intel_context *ce,
2150 const struct flex *flex, unsigned int count)
2151 {
2152 u32 offset;
2153 u32 *cs;
2154
2155 cs = intel_ring_begin(rq, 4 * count);
2156 if (IS_ERR(cs))
2157 return PTR_ERR(cs);
2158
2159 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2160 do {
2161 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2162 *cs++ = offset + flex->offset * sizeof(u32);
2163 *cs++ = 0;
2164 *cs++ = flex->value;
2165 } while (flex++, --count);
2166
2167 intel_ring_advance(rq, cs);
2168
2169 return 0;
2170 }
2171
2172 static int
gen8_load_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2173 gen8_load_flex(struct i915_request *rq,
2174 struct intel_context *ce,
2175 const struct flex *flex, unsigned int count)
2176 {
2177 u32 *cs;
2178
2179 GEM_BUG_ON(!count || count > 63);
2180
2181 cs = intel_ring_begin(rq, 2 * count + 2);
2182 if (IS_ERR(cs))
2183 return PTR_ERR(cs);
2184
2185 *cs++ = MI_LOAD_REGISTER_IMM(count);
2186 do {
2187 *cs++ = i915_mmio_reg_offset(flex->reg);
2188 *cs++ = flex->value;
2189 } while (flex++, --count);
2190 *cs++ = MI_NOOP;
2191
2192 intel_ring_advance(rq, cs);
2193
2194 return 0;
2195 }
2196
gen8_modify_context(struct intel_context * ce,const struct flex * flex,unsigned int count)2197 static int gen8_modify_context(struct intel_context *ce,
2198 const struct flex *flex, unsigned int count)
2199 {
2200 struct i915_request *rq;
2201 int err;
2202
2203 rq = intel_engine_create_kernel_request(ce->engine);
2204 if (IS_ERR(rq))
2205 return PTR_ERR(rq);
2206
2207 /* Serialise with the remote context */
2208 err = intel_context_prepare_remote_request(ce, rq);
2209 if (err == 0)
2210 err = gen8_store_flex(rq, ce, flex, count);
2211
2212 i915_request_add(rq);
2213 return err;
2214 }
2215
2216 static int
gen8_modify_self(struct intel_context * ce,const struct flex * flex,unsigned int count,struct i915_active * active)2217 gen8_modify_self(struct intel_context *ce,
2218 const struct flex *flex, unsigned int count,
2219 struct i915_active *active)
2220 {
2221 struct i915_request *rq;
2222 int err;
2223
2224 intel_engine_pm_get(ce->engine);
2225 rq = i915_request_create(ce);
2226 intel_engine_pm_put(ce->engine);
2227 if (IS_ERR(rq))
2228 return PTR_ERR(rq);
2229
2230 if (!IS_ERR_OR_NULL(active)) {
2231 err = i915_active_add_request(active, rq);
2232 if (err)
2233 goto err_add_request;
2234 }
2235
2236 err = gen8_load_flex(rq, ce, flex, count);
2237 if (err)
2238 goto err_add_request;
2239
2240 err_add_request:
2241 i915_request_add(rq);
2242 return err;
2243 }
2244
gen8_configure_context(struct i915_gem_context * ctx,struct flex * flex,unsigned int count)2245 static int gen8_configure_context(struct i915_gem_context *ctx,
2246 struct flex *flex, unsigned int count)
2247 {
2248 struct i915_gem_engines_iter it;
2249 struct intel_context *ce;
2250 int err = 0;
2251
2252 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2253 GEM_BUG_ON(ce == ce->engine->kernel_context);
2254
2255 if (ce->engine->class != RENDER_CLASS)
2256 continue;
2257
2258 /* Otherwise OA settings will be set upon first use */
2259 if (!intel_context_pin_if_active(ce))
2260 continue;
2261
2262 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2263 err = gen8_modify_context(ce, flex, count);
2264
2265 intel_context_unpin(ce);
2266 if (err)
2267 break;
2268 }
2269 i915_gem_context_unlock_engines(ctx);
2270
2271 return err;
2272 }
2273
gen12_configure_oar_context(struct i915_perf_stream * stream,struct i915_active * active)2274 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2275 struct i915_active *active)
2276 {
2277 int err;
2278 struct intel_context *ce = stream->pinned_ctx;
2279 u32 format = stream->oa_buffer.format;
2280 struct flex regs_context[] = {
2281 {
2282 GEN8_OACTXCONTROL,
2283 stream->perf->ctx_oactxctrl_offset + 1,
2284 active ? GEN8_OA_COUNTER_RESUME : 0,
2285 },
2286 };
2287 /* Offsets in regs_lri are not used since this configuration is only
2288 * applied using LRI. Initialize the correct offsets for posterity.
2289 */
2290 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2291 struct flex regs_lri[] = {
2292 {
2293 GEN12_OAR_OACONTROL,
2294 GEN12_OAR_OACONTROL_OFFSET + 1,
2295 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2296 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2297 },
2298 {
2299 RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2300 CTX_CONTEXT_CONTROL,
2301 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2302 active ?
2303 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2304 0)
2305 },
2306 };
2307
2308 /* Modify the context image of pinned context with regs_context*/
2309 err = intel_context_lock_pinned(ce);
2310 if (err)
2311 return err;
2312
2313 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2314 intel_context_unlock_pinned(ce);
2315 if (err)
2316 return err;
2317
2318 /* Apply regs_lri using LRI with pinned context */
2319 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2320 }
2321
2322 /*
2323 * Manages updating the per-context aspects of the OA stream
2324 * configuration across all contexts.
2325 *
2326 * The awkward consideration here is that OACTXCONTROL controls the
2327 * exponent for periodic sampling which is primarily used for system
2328 * wide profiling where we'd like a consistent sampling period even in
2329 * the face of context switches.
2330 *
2331 * Our approach of updating the register state context (as opposed to
2332 * say using a workaround batch buffer) ensures that the hardware
2333 * won't automatically reload an out-of-date timer exponent even
2334 * transiently before a WA BB could be parsed.
2335 *
2336 * This function needs to:
2337 * - Ensure the currently running context's per-context OA state is
2338 * updated
2339 * - Ensure that all existing contexts will have the correct per-context
2340 * OA state if they are scheduled for use.
2341 * - Ensure any new contexts will be initialized with the correct
2342 * per-context OA state.
2343 *
2344 * Note: it's only the RCS/Render context that has any OA state.
2345 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2346 */
2347 static int
oa_configure_all_contexts(struct i915_perf_stream * stream,struct flex * regs,size_t num_regs,struct i915_active * active)2348 oa_configure_all_contexts(struct i915_perf_stream *stream,
2349 struct flex *regs,
2350 size_t num_regs,
2351 struct i915_active *active)
2352 {
2353 struct drm_i915_private *i915 = stream->perf->i915;
2354 struct intel_engine_cs *engine;
2355 struct i915_gem_context *ctx, *cn;
2356 int err;
2357
2358 lockdep_assert_held(&stream->perf->lock);
2359
2360 /*
2361 * The OA register config is setup through the context image. This image
2362 * might be written to by the GPU on context switch (in particular on
2363 * lite-restore). This means we can't safely update a context's image,
2364 * if this context is scheduled/submitted to run on the GPU.
2365 *
2366 * We could emit the OA register config through the batch buffer but
2367 * this might leave small interval of time where the OA unit is
2368 * configured at an invalid sampling period.
2369 *
2370 * Note that since we emit all requests from a single ring, there
2371 * is still an implicit global barrier here that may cause a high
2372 * priority context to wait for an otherwise independent low priority
2373 * context. Contexts idle at the time of reconfiguration are not
2374 * trapped behind the barrier.
2375 */
2376 spin_lock(&i915->gem.contexts.lock);
2377 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2378 if (!kref_get_unless_zero(&ctx->ref))
2379 continue;
2380
2381 spin_unlock(&i915->gem.contexts.lock);
2382
2383 err = gen8_configure_context(ctx, regs, num_regs);
2384 if (err) {
2385 i915_gem_context_put(ctx);
2386 return err;
2387 }
2388
2389 spin_lock(&i915->gem.contexts.lock);
2390 list_safe_reset_next(ctx, cn, link);
2391 i915_gem_context_put(ctx);
2392 }
2393 spin_unlock(&i915->gem.contexts.lock);
2394
2395 /*
2396 * After updating all other contexts, we need to modify ourselves.
2397 * If we don't modify the kernel_context, we do not get events while
2398 * idle.
2399 */
2400 for_each_uabi_engine(engine, i915) {
2401 struct intel_context *ce = engine->kernel_context;
2402
2403 if (engine->class != RENDER_CLASS)
2404 continue;
2405
2406 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2407
2408 err = gen8_modify_self(ce, regs, num_regs, active);
2409 if (err)
2410 return err;
2411 }
2412
2413 return 0;
2414 }
2415
2416 static int
gen12_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2417 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2418 const struct i915_oa_config *oa_config,
2419 struct i915_active *active)
2420 {
2421 struct flex regs[] = {
2422 {
2423 GEN8_R_PWR_CLK_STATE,
2424 CTX_R_PWR_CLK_STATE,
2425 },
2426 };
2427
2428 return oa_configure_all_contexts(stream,
2429 regs, ARRAY_SIZE(regs),
2430 active);
2431 }
2432
2433 static int
lrc_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2434 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2435 const struct i915_oa_config *oa_config,
2436 struct i915_active *active)
2437 {
2438 /* The MMIO offsets for Flex EU registers aren't contiguous */
2439 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2440 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2441 struct flex regs[] = {
2442 {
2443 GEN8_R_PWR_CLK_STATE,
2444 CTX_R_PWR_CLK_STATE,
2445 },
2446 {
2447 GEN8_OACTXCONTROL,
2448 stream->perf->ctx_oactxctrl_offset + 1,
2449 },
2450 { EU_PERF_CNTL0, ctx_flexeuN(0) },
2451 { EU_PERF_CNTL1, ctx_flexeuN(1) },
2452 { EU_PERF_CNTL2, ctx_flexeuN(2) },
2453 { EU_PERF_CNTL3, ctx_flexeuN(3) },
2454 { EU_PERF_CNTL4, ctx_flexeuN(4) },
2455 { EU_PERF_CNTL5, ctx_flexeuN(5) },
2456 { EU_PERF_CNTL6, ctx_flexeuN(6) },
2457 };
2458 #undef ctx_flexeuN
2459 int i;
2460
2461 regs[1].value =
2462 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2463 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2464 GEN8_OA_COUNTER_RESUME;
2465
2466 for (i = 2; i < ARRAY_SIZE(regs); i++)
2467 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2468
2469 return oa_configure_all_contexts(stream,
2470 regs, ARRAY_SIZE(regs),
2471 active);
2472 }
2473
2474 static int
gen8_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2475 gen8_enable_metric_set(struct i915_perf_stream *stream,
2476 struct i915_active *active)
2477 {
2478 struct intel_uncore *uncore = stream->uncore;
2479 struct i915_oa_config *oa_config = stream->oa_config;
2480 int ret;
2481
2482 /*
2483 * We disable slice/unslice clock ratio change reports on SKL since
2484 * they are too noisy. The HW generates a lot of redundant reports
2485 * where the ratio hasn't really changed causing a lot of redundant
2486 * work to processes and increasing the chances we'll hit buffer
2487 * overruns.
2488 *
2489 * Although we don't currently use the 'disable overrun' OABUFFER
2490 * feature it's worth noting that clock ratio reports have to be
2491 * disabled before considering to use that feature since the HW doesn't
2492 * correctly block these reports.
2493 *
2494 * Currently none of the high-level metrics we have depend on knowing
2495 * this ratio to normalize.
2496 *
2497 * Note: This register is not power context saved and restored, but
2498 * that's OK considering that we disable RC6 while the OA unit is
2499 * enabled.
2500 *
2501 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2502 * be read back from automatically triggered reports, as part of the
2503 * RPT_ID field.
2504 */
2505 if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2506 intel_uncore_write(uncore, GEN8_OA_DEBUG,
2507 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2508 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2509 }
2510
2511 /*
2512 * Update all contexts prior writing the mux configurations as we need
2513 * to make sure all slices/subslices are ON before writing to NOA
2514 * registers.
2515 */
2516 ret = lrc_configure_all_contexts(stream, oa_config, active);
2517 if (ret)
2518 return ret;
2519
2520 return emit_oa_config(stream,
2521 stream->oa_config, oa_context(stream),
2522 active);
2523 }
2524
oag_report_ctx_switches(const struct i915_perf_stream * stream)2525 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2526 {
2527 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2528 (stream->sample_flags & SAMPLE_OA_REPORT) ?
2529 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2530 }
2531
2532 static int
gen12_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2533 gen12_enable_metric_set(struct i915_perf_stream *stream,
2534 struct i915_active *active)
2535 {
2536 struct intel_uncore *uncore = stream->uncore;
2537 struct i915_oa_config *oa_config = stream->oa_config;
2538 bool periodic = stream->periodic;
2539 u32 period_exponent = stream->period_exponent;
2540 int ret;
2541
2542 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2543 /* Disable clk ratio reports, like previous Gens. */
2544 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2545 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2546 /*
2547 * If the user didn't require OA reports, instruct
2548 * the hardware not to emit ctx switch reports.
2549 */
2550 oag_report_ctx_switches(stream));
2551
2552 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2553 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2554 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2555 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2556 : 0);
2557
2558 /*
2559 * Update all contexts prior writing the mux configurations as we need
2560 * to make sure all slices/subslices are ON before writing to NOA
2561 * registers.
2562 */
2563 ret = gen12_configure_all_contexts(stream, oa_config, active);
2564 if (ret)
2565 return ret;
2566
2567 /*
2568 * For Gen12, performance counters are context
2569 * saved/restored. Only enable it for the context that
2570 * requested this.
2571 */
2572 if (stream->ctx) {
2573 ret = gen12_configure_oar_context(stream, active);
2574 if (ret)
2575 return ret;
2576 }
2577
2578 return emit_oa_config(stream,
2579 stream->oa_config, oa_context(stream),
2580 active);
2581 }
2582
gen8_disable_metric_set(struct i915_perf_stream * stream)2583 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2584 {
2585 struct intel_uncore *uncore = stream->uncore;
2586
2587 /* Reset all contexts' slices/subslices configurations. */
2588 lrc_configure_all_contexts(stream, NULL, NULL);
2589
2590 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2591 }
2592
gen11_disable_metric_set(struct i915_perf_stream * stream)2593 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2594 {
2595 struct intel_uncore *uncore = stream->uncore;
2596
2597 /* Reset all contexts' slices/subslices configurations. */
2598 lrc_configure_all_contexts(stream, NULL, NULL);
2599
2600 /* Make sure we disable noa to save power. */
2601 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2602 }
2603
gen12_disable_metric_set(struct i915_perf_stream * stream)2604 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2605 {
2606 struct intel_uncore *uncore = stream->uncore;
2607
2608 /* Reset all contexts' slices/subslices configurations. */
2609 gen12_configure_all_contexts(stream, NULL, NULL);
2610
2611 /* disable the context save/restore or OAR counters */
2612 if (stream->ctx)
2613 gen12_configure_oar_context(stream, NULL);
2614
2615 /* Make sure we disable noa to save power. */
2616 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2617 }
2618
gen7_oa_enable(struct i915_perf_stream * stream)2619 static void gen7_oa_enable(struct i915_perf_stream *stream)
2620 {
2621 struct intel_uncore *uncore = stream->uncore;
2622 struct i915_gem_context *ctx = stream->ctx;
2623 u32 ctx_id = stream->specific_ctx_id;
2624 bool periodic = stream->periodic;
2625 u32 period_exponent = stream->period_exponent;
2626 u32 report_format = stream->oa_buffer.format;
2627
2628 /*
2629 * Reset buf pointers so we don't forward reports from before now.
2630 *
2631 * Think carefully if considering trying to avoid this, since it
2632 * also ensures status flags and the buffer itself are cleared
2633 * in error paths, and we have checks for invalid reports based
2634 * on the assumption that certain fields are written to zeroed
2635 * memory which this helps maintains.
2636 */
2637 gen7_init_oa_buffer(stream);
2638
2639 intel_uncore_write(uncore, GEN7_OACONTROL,
2640 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2641 (period_exponent <<
2642 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2643 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2644 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2645 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2646 GEN7_OACONTROL_ENABLE);
2647 }
2648
gen8_oa_enable(struct i915_perf_stream * stream)2649 static void gen8_oa_enable(struct i915_perf_stream *stream)
2650 {
2651 struct intel_uncore *uncore = stream->uncore;
2652 u32 report_format = stream->oa_buffer.format;
2653
2654 /*
2655 * Reset buf pointers so we don't forward reports from before now.
2656 *
2657 * Think carefully if considering trying to avoid this, since it
2658 * also ensures status flags and the buffer itself are cleared
2659 * in error paths, and we have checks for invalid reports based
2660 * on the assumption that certain fields are written to zeroed
2661 * memory which this helps maintains.
2662 */
2663 gen8_init_oa_buffer(stream);
2664
2665 /*
2666 * Note: we don't rely on the hardware to perform single context
2667 * filtering and instead filter on the cpu based on the context-id
2668 * field of reports
2669 */
2670 intel_uncore_write(uncore, GEN8_OACONTROL,
2671 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2672 GEN8_OA_COUNTER_ENABLE);
2673 }
2674
gen12_oa_enable(struct i915_perf_stream * stream)2675 static void gen12_oa_enable(struct i915_perf_stream *stream)
2676 {
2677 struct intel_uncore *uncore = stream->uncore;
2678 u32 report_format = stream->oa_buffer.format;
2679
2680 /*
2681 * If we don't want OA reports from the OA buffer, then we don't even
2682 * need to program the OAG unit.
2683 */
2684 if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2685 return;
2686
2687 gen12_init_oa_buffer(stream);
2688
2689 intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2690 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2691 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2692 }
2693
2694 /**
2695 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2696 * @stream: An i915 perf stream opened for OA metrics
2697 *
2698 * [Re]enables hardware periodic sampling according to the period configured
2699 * when opening the stream. This also starts a hrtimer that will periodically
2700 * check for data in the circular OA buffer for notifying userspace (e.g.
2701 * during a read() or poll()).
2702 */
i915_oa_stream_enable(struct i915_perf_stream * stream)2703 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2704 {
2705 stream->pollin = false;
2706
2707 stream->perf->ops.oa_enable(stream);
2708
2709 if (stream->sample_flags & SAMPLE_OA_REPORT)
2710 hrtimer_start(&stream->poll_check_timer,
2711 ns_to_ktime(stream->poll_oa_period),
2712 HRTIMER_MODE_REL_PINNED);
2713 }
2714
gen7_oa_disable(struct i915_perf_stream * stream)2715 static void gen7_oa_disable(struct i915_perf_stream *stream)
2716 {
2717 struct intel_uncore *uncore = stream->uncore;
2718
2719 intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2720 if (intel_wait_for_register(uncore,
2721 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2722 50))
2723 drm_err(&stream->perf->i915->drm,
2724 "wait for OA to be disabled timed out\n");
2725 }
2726
gen8_oa_disable(struct i915_perf_stream * stream)2727 static void gen8_oa_disable(struct i915_perf_stream *stream)
2728 {
2729 struct intel_uncore *uncore = stream->uncore;
2730
2731 intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2732 if (intel_wait_for_register(uncore,
2733 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2734 50))
2735 drm_err(&stream->perf->i915->drm,
2736 "wait for OA to be disabled timed out\n");
2737 }
2738
gen12_oa_disable(struct i915_perf_stream * stream)2739 static void gen12_oa_disable(struct i915_perf_stream *stream)
2740 {
2741 struct intel_uncore *uncore = stream->uncore;
2742
2743 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2744 if (intel_wait_for_register(uncore,
2745 GEN12_OAG_OACONTROL,
2746 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2747 50))
2748 drm_err(&stream->perf->i915->drm,
2749 "wait for OA to be disabled timed out\n");
2750
2751 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2752 if (intel_wait_for_register(uncore,
2753 GEN12_OA_TLB_INV_CR,
2754 1, 0,
2755 50))
2756 drm_err(&stream->perf->i915->drm,
2757 "wait for OA tlb invalidate timed out\n");
2758 }
2759
2760 /**
2761 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2762 * @stream: An i915 perf stream opened for OA metrics
2763 *
2764 * Stops the OA unit from periodically writing counter reports into the
2765 * circular OA buffer. This also stops the hrtimer that periodically checks for
2766 * data in the circular OA buffer, for notifying userspace.
2767 */
i915_oa_stream_disable(struct i915_perf_stream * stream)2768 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2769 {
2770 stream->perf->ops.oa_disable(stream);
2771
2772 if (stream->sample_flags & SAMPLE_OA_REPORT)
2773 hrtimer_cancel(&stream->poll_check_timer);
2774 }
2775
2776 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2777 .destroy = i915_oa_stream_destroy,
2778 .enable = i915_oa_stream_enable,
2779 .disable = i915_oa_stream_disable,
2780 .wait_unlocked = i915_oa_wait_unlocked,
2781 .poll_wait = i915_oa_poll_wait,
2782 .read = i915_oa_read,
2783 };
2784
i915_perf_stream_enable_sync(struct i915_perf_stream * stream)2785 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2786 {
2787 struct i915_active *active;
2788 int err;
2789
2790 active = i915_active_create();
2791 if (!active)
2792 return -ENOMEM;
2793
2794 err = stream->perf->ops.enable_metric_set(stream, active);
2795 if (err == 0)
2796 __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2797
2798 i915_active_put(active);
2799 return err;
2800 }
2801
2802 static void
get_default_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine)2803 get_default_sseu_config(struct intel_sseu *out_sseu,
2804 struct intel_engine_cs *engine)
2805 {
2806 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2807
2808 *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2809
2810 if (GRAPHICS_VER(engine->i915) == 11) {
2811 /*
2812 * We only need subslice count so it doesn't matter which ones
2813 * we select - just turn off low bits in the amount of half of
2814 * all available subslices per slice.
2815 */
2816 out_sseu->subslice_mask =
2817 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2818 out_sseu->slice_mask = 0x1;
2819 }
2820 }
2821
2822 static int
get_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine,const struct drm_i915_gem_context_param_sseu * drm_sseu)2823 get_sseu_config(struct intel_sseu *out_sseu,
2824 struct intel_engine_cs *engine,
2825 const struct drm_i915_gem_context_param_sseu *drm_sseu)
2826 {
2827 if (drm_sseu->engine.engine_class != engine->uabi_class ||
2828 drm_sseu->engine.engine_instance != engine->uabi_instance)
2829 return -EINVAL;
2830
2831 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2832 }
2833
2834 /**
2835 * i915_oa_stream_init - validate combined props for OA stream and init
2836 * @stream: An i915 perf stream
2837 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2838 * @props: The property state that configures stream (individually validated)
2839 *
2840 * While read_properties_unlocked() validates properties in isolation it
2841 * doesn't ensure that the combination necessarily makes sense.
2842 *
2843 * At this point it has been determined that userspace wants a stream of
2844 * OA metrics, but still we need to further validate the combined
2845 * properties are OK.
2846 *
2847 * If the configuration makes sense then we can allocate memory for
2848 * a circular OA buffer and apply the requested metric set configuration.
2849 *
2850 * Returns: zero on success or a negative error code.
2851 */
i915_oa_stream_init(struct i915_perf_stream * stream,struct drm_i915_perf_open_param * param,struct perf_open_properties * props)2852 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2853 struct drm_i915_perf_open_param *param,
2854 struct perf_open_properties *props)
2855 {
2856 struct drm_i915_private *i915 = stream->perf->i915;
2857 struct i915_perf *perf = stream->perf;
2858 int format_size;
2859 int ret;
2860
2861 if (!props->engine) {
2862 drm_dbg(&stream->perf->i915->drm,
2863 "OA engine not specified\n");
2864 return -EINVAL;
2865 }
2866
2867 /*
2868 * If the sysfs metrics/ directory wasn't registered for some
2869 * reason then don't let userspace try their luck with config
2870 * IDs
2871 */
2872 if (!perf->metrics_kobj) {
2873 drm_dbg(&stream->perf->i915->drm,
2874 "OA metrics weren't advertised via sysfs\n");
2875 return -EINVAL;
2876 }
2877
2878 if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2879 (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
2880 drm_dbg(&stream->perf->i915->drm,
2881 "Only OA report sampling supported\n");
2882 return -EINVAL;
2883 }
2884
2885 if (!perf->ops.enable_metric_set) {
2886 drm_dbg(&stream->perf->i915->drm,
2887 "OA unit not supported\n");
2888 return -ENODEV;
2889 }
2890
2891 /*
2892 * To avoid the complexity of having to accurately filter
2893 * counter reports and marshal to the appropriate client
2894 * we currently only allow exclusive access
2895 */
2896 if (perf->exclusive_stream) {
2897 drm_dbg(&stream->perf->i915->drm,
2898 "OA unit already in use\n");
2899 return -EBUSY;
2900 }
2901
2902 if (!props->oa_format) {
2903 drm_dbg(&stream->perf->i915->drm,
2904 "OA report format not specified\n");
2905 return -EINVAL;
2906 }
2907
2908 stream->engine = props->engine;
2909 stream->uncore = stream->engine->gt->uncore;
2910
2911 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2912
2913 format_size = perf->oa_formats[props->oa_format].size;
2914
2915 stream->sample_flags = props->sample_flags;
2916 stream->sample_size += format_size;
2917
2918 stream->oa_buffer.format_size = format_size;
2919 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2920 return -EINVAL;
2921
2922 stream->hold_preemption = props->hold_preemption;
2923
2924 stream->oa_buffer.format =
2925 perf->oa_formats[props->oa_format].format;
2926
2927 stream->periodic = props->oa_periodic;
2928 if (stream->periodic)
2929 stream->period_exponent = props->oa_period_exponent;
2930
2931 if (stream->ctx) {
2932 ret = oa_get_render_ctx_id(stream);
2933 if (ret) {
2934 drm_dbg(&stream->perf->i915->drm,
2935 "Invalid context id to filter with\n");
2936 return ret;
2937 }
2938 }
2939
2940 ret = alloc_noa_wait(stream);
2941 if (ret) {
2942 drm_dbg(&stream->perf->i915->drm,
2943 "Unable to allocate NOA wait batch buffer\n");
2944 goto err_noa_wait_alloc;
2945 }
2946
2947 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2948 if (!stream->oa_config) {
2949 drm_dbg(&stream->perf->i915->drm,
2950 "Invalid OA config id=%i\n", props->metrics_set);
2951 ret = -EINVAL;
2952 goto err_config;
2953 }
2954
2955 /* PRM - observability performance counters:
2956 *
2957 * OACONTROL, performance counter enable, note:
2958 *
2959 * "When this bit is set, in order to have coherent counts,
2960 * RC6 power state and trunk clock gating must be disabled.
2961 * This can be achieved by programming MMIO registers as
2962 * 0xA094=0 and 0xA090[31]=1"
2963 *
2964 * In our case we are expecting that taking pm + FORCEWAKE
2965 * references will effectively disable RC6.
2966 */
2967 intel_engine_pm_get(stream->engine);
2968 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2969
2970 ret = alloc_oa_buffer(stream);
2971 if (ret)
2972 goto err_oa_buf_alloc;
2973
2974 stream->ops = &i915_oa_stream_ops;
2975
2976 perf->sseu = props->sseu;
2977 WRITE_ONCE(perf->exclusive_stream, stream);
2978
2979 ret = i915_perf_stream_enable_sync(stream);
2980 if (ret) {
2981 drm_dbg(&stream->perf->i915->drm,
2982 "Unable to enable metric set\n");
2983 goto err_enable;
2984 }
2985
2986 drm_dbg(&stream->perf->i915->drm,
2987 "opening stream oa config uuid=%s\n",
2988 stream->oa_config->uuid);
2989
2990 hrtimer_init(&stream->poll_check_timer,
2991 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2992 stream->poll_check_timer.function = oa_poll_check_timer_cb;
2993 init_waitqueue_head(&stream->poll_wq);
2994 spin_lock_init(&stream->oa_buffer.ptr_lock);
2995
2996 return 0;
2997
2998 err_enable:
2999 WRITE_ONCE(perf->exclusive_stream, NULL);
3000 perf->ops.disable_metric_set(stream);
3001
3002 free_oa_buffer(stream);
3003
3004 err_oa_buf_alloc:
3005 free_oa_configs(stream);
3006
3007 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
3008 intel_engine_pm_put(stream->engine);
3009
3010 err_config:
3011 free_noa_wait(stream);
3012
3013 err_noa_wait_alloc:
3014 if (stream->ctx)
3015 oa_put_render_ctx_id(stream);
3016
3017 return ret;
3018 }
3019
i915_oa_init_reg_state(const struct intel_context * ce,const struct intel_engine_cs * engine)3020 void i915_oa_init_reg_state(const struct intel_context *ce,
3021 const struct intel_engine_cs *engine)
3022 {
3023 struct i915_perf_stream *stream;
3024
3025 if (engine->class != RENDER_CLASS)
3026 return;
3027
3028 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3029 stream = READ_ONCE(engine->i915->perf.exclusive_stream);
3030 if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3031 gen8_update_reg_state_unlocked(ce, stream);
3032 }
3033
3034 /**
3035 * i915_perf_read - handles read() FOP for i915 perf stream FDs
3036 * @file: An i915 perf stream file
3037 * @buf: destination buffer given by userspace
3038 * @count: the number of bytes userspace wants to read
3039 * @ppos: (inout) file seek position (unused)
3040 *
3041 * The entry point for handling a read() on a stream file descriptor from
3042 * userspace. Most of the work is left to the i915_perf_read_locked() and
3043 * &i915_perf_stream_ops->read but to save having stream implementations (of
3044 * which we might have multiple later) we handle blocking read here.
3045 *
3046 * We can also consistently treat trying to read from a disabled stream
3047 * as an IO error so implementations can assume the stream is enabled
3048 * while reading.
3049 *
3050 * Returns: The number of bytes copied or a negative error code on failure.
3051 */
i915_perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3052 static ssize_t i915_perf_read(struct file *file,
3053 char __user *buf,
3054 size_t count,
3055 loff_t *ppos)
3056 {
3057 struct i915_perf_stream *stream = file->private_data;
3058 struct i915_perf *perf = stream->perf;
3059 size_t offset = 0;
3060 int ret;
3061
3062 /* To ensure it's handled consistently we simply treat all reads of a
3063 * disabled stream as an error. In particular it might otherwise lead
3064 * to a deadlock for blocking file descriptors...
3065 */
3066 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3067 return -EIO;
3068
3069 if (!(file->f_flags & O_NONBLOCK)) {
3070 /* There's the small chance of false positives from
3071 * stream->ops->wait_unlocked.
3072 *
3073 * E.g. with single context filtering since we only wait until
3074 * oabuffer has >= 1 report we don't immediately know whether
3075 * any reports really belong to the current context
3076 */
3077 do {
3078 ret = stream->ops->wait_unlocked(stream);
3079 if (ret)
3080 return ret;
3081
3082 mutex_lock(&perf->lock);
3083 ret = stream->ops->read(stream, buf, count, &offset);
3084 mutex_unlock(&perf->lock);
3085 } while (!offset && !ret);
3086 } else {
3087 mutex_lock(&perf->lock);
3088 ret = stream->ops->read(stream, buf, count, &offset);
3089 mutex_unlock(&perf->lock);
3090 }
3091
3092 /* We allow the poll checking to sometimes report false positive EPOLLIN
3093 * events where we might actually report EAGAIN on read() if there's
3094 * not really any data available. In this situation though we don't
3095 * want to enter a busy loop between poll() reporting a EPOLLIN event
3096 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3097 * effectively ensures we back off until the next hrtimer callback
3098 * before reporting another EPOLLIN event.
3099 * The exception to this is if ops->read() returned -ENOSPC which means
3100 * that more OA data is available than could fit in the user provided
3101 * buffer. In this case we want the next poll() call to not block.
3102 */
3103 if (ret != -ENOSPC)
3104 stream->pollin = false;
3105
3106 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3107 return offset ?: (ret ?: -EAGAIN);
3108 }
3109
oa_poll_check_timer_cb(struct hrtimer * hrtimer)3110 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3111 {
3112 struct i915_perf_stream *stream =
3113 container_of(hrtimer, typeof(*stream), poll_check_timer);
3114
3115 if (oa_buffer_check_unlocked(stream)) {
3116 stream->pollin = true;
3117 wake_up(&stream->poll_wq);
3118 }
3119
3120 hrtimer_forward_now(hrtimer,
3121 ns_to_ktime(stream->poll_oa_period));
3122
3123 return HRTIMER_RESTART;
3124 }
3125
3126 /**
3127 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3128 * @stream: An i915 perf stream
3129 * @file: An i915 perf stream file
3130 * @wait: poll() state table
3131 *
3132 * For handling userspace polling on an i915 perf stream, this calls through to
3133 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3134 * will be woken for new stream data.
3135 *
3136 * Note: The &perf->lock mutex has been taken to serialize
3137 * with any non-file-operation driver hooks.
3138 *
3139 * Returns: any poll events that are ready without sleeping
3140 */
i915_perf_poll_locked(struct i915_perf_stream * stream,struct file * file,poll_table * wait)3141 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3142 struct file *file,
3143 poll_table *wait)
3144 {
3145 __poll_t events = 0;
3146
3147 stream->ops->poll_wait(stream, file, wait);
3148
3149 /* Note: we don't explicitly check whether there's something to read
3150 * here since this path may be very hot depending on what else
3151 * userspace is polling, or on the timeout in use. We rely solely on
3152 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3153 * samples to read.
3154 */
3155 if (stream->pollin)
3156 events |= EPOLLIN;
3157
3158 return events;
3159 }
3160
3161 /**
3162 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3163 * @file: An i915 perf stream file
3164 * @wait: poll() state table
3165 *
3166 * For handling userspace polling on an i915 perf stream, this ensures
3167 * poll_wait() gets called with a wait queue that will be woken for new stream
3168 * data.
3169 *
3170 * Note: Implementation deferred to i915_perf_poll_locked()
3171 *
3172 * Returns: any poll events that are ready without sleeping
3173 */
i915_perf_poll(struct file * file,poll_table * wait)3174 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3175 {
3176 struct i915_perf_stream *stream = file->private_data;
3177 struct i915_perf *perf = stream->perf;
3178 __poll_t ret;
3179
3180 mutex_lock(&perf->lock);
3181 ret = i915_perf_poll_locked(stream, file, wait);
3182 mutex_unlock(&perf->lock);
3183
3184 return ret;
3185 }
3186
3187 /**
3188 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3189 * @stream: A disabled i915 perf stream
3190 *
3191 * [Re]enables the associated capture of data for this stream.
3192 *
3193 * If a stream was previously enabled then there's currently no intention
3194 * to provide userspace any guarantee about the preservation of previously
3195 * buffered data.
3196 */
i915_perf_enable_locked(struct i915_perf_stream * stream)3197 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3198 {
3199 if (stream->enabled)
3200 return;
3201
3202 /* Allow stream->ops->enable() to refer to this */
3203 stream->enabled = true;
3204
3205 if (stream->ops->enable)
3206 stream->ops->enable(stream);
3207
3208 if (stream->hold_preemption)
3209 intel_context_set_nopreempt(stream->pinned_ctx);
3210 }
3211
3212 /**
3213 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3214 * @stream: An enabled i915 perf stream
3215 *
3216 * Disables the associated capture of data for this stream.
3217 *
3218 * The intention is that disabling an re-enabling a stream will ideally be
3219 * cheaper than destroying and re-opening a stream with the same configuration,
3220 * though there are no formal guarantees about what state or buffered data
3221 * must be retained between disabling and re-enabling a stream.
3222 *
3223 * Note: while a stream is disabled it's considered an error for userspace
3224 * to attempt to read from the stream (-EIO).
3225 */
i915_perf_disable_locked(struct i915_perf_stream * stream)3226 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3227 {
3228 if (!stream->enabled)
3229 return;
3230
3231 /* Allow stream->ops->disable() to refer to this */
3232 stream->enabled = false;
3233
3234 if (stream->hold_preemption)
3235 intel_context_clear_nopreempt(stream->pinned_ctx);
3236
3237 if (stream->ops->disable)
3238 stream->ops->disable(stream);
3239 }
3240
i915_perf_config_locked(struct i915_perf_stream * stream,unsigned long metrics_set)3241 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3242 unsigned long metrics_set)
3243 {
3244 struct i915_oa_config *config;
3245 long ret = stream->oa_config->id;
3246
3247 config = i915_perf_get_oa_config(stream->perf, metrics_set);
3248 if (!config)
3249 return -EINVAL;
3250
3251 if (config != stream->oa_config) {
3252 int err;
3253
3254 /*
3255 * If OA is bound to a specific context, emit the
3256 * reconfiguration inline from that context. The update
3257 * will then be ordered with respect to submission on that
3258 * context.
3259 *
3260 * When set globally, we use a low priority kernel context,
3261 * so it will effectively take effect when idle.
3262 */
3263 err = emit_oa_config(stream, config, oa_context(stream), NULL);
3264 if (!err)
3265 config = xchg(&stream->oa_config, config);
3266 else
3267 ret = err;
3268 }
3269
3270 i915_oa_config_put(config);
3271
3272 return ret;
3273 }
3274
3275 /**
3276 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3277 * @stream: An i915 perf stream
3278 * @cmd: the ioctl request
3279 * @arg: the ioctl data
3280 *
3281 * Note: The &perf->lock mutex has been taken to serialize
3282 * with any non-file-operation driver hooks.
3283 *
3284 * Returns: zero on success or a negative error code. Returns -EINVAL for
3285 * an unknown ioctl request.
3286 */
i915_perf_ioctl_locked(struct i915_perf_stream * stream,unsigned int cmd,unsigned long arg)3287 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3288 unsigned int cmd,
3289 unsigned long arg)
3290 {
3291 switch (cmd) {
3292 case I915_PERF_IOCTL_ENABLE:
3293 i915_perf_enable_locked(stream);
3294 return 0;
3295 case I915_PERF_IOCTL_DISABLE:
3296 i915_perf_disable_locked(stream);
3297 return 0;
3298 case I915_PERF_IOCTL_CONFIG:
3299 return i915_perf_config_locked(stream, arg);
3300 }
3301
3302 return -EINVAL;
3303 }
3304
3305 /**
3306 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3307 * @file: An i915 perf stream file
3308 * @cmd: the ioctl request
3309 * @arg: the ioctl data
3310 *
3311 * Implementation deferred to i915_perf_ioctl_locked().
3312 *
3313 * Returns: zero on success or a negative error code. Returns -EINVAL for
3314 * an unknown ioctl request.
3315 */
i915_perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3316 static long i915_perf_ioctl(struct file *file,
3317 unsigned int cmd,
3318 unsigned long arg)
3319 {
3320 struct i915_perf_stream *stream = file->private_data;
3321 struct i915_perf *perf = stream->perf;
3322 long ret;
3323
3324 mutex_lock(&perf->lock);
3325 ret = i915_perf_ioctl_locked(stream, cmd, arg);
3326 mutex_unlock(&perf->lock);
3327
3328 return ret;
3329 }
3330
3331 /**
3332 * i915_perf_destroy_locked - destroy an i915 perf stream
3333 * @stream: An i915 perf stream
3334 *
3335 * Frees all resources associated with the given i915 perf @stream, disabling
3336 * any associated data capture in the process.
3337 *
3338 * Note: The &perf->lock mutex has been taken to serialize
3339 * with any non-file-operation driver hooks.
3340 */
i915_perf_destroy_locked(struct i915_perf_stream * stream)3341 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3342 {
3343 if (stream->enabled)
3344 i915_perf_disable_locked(stream);
3345
3346 if (stream->ops->destroy)
3347 stream->ops->destroy(stream);
3348
3349 if (stream->ctx)
3350 i915_gem_context_put(stream->ctx);
3351
3352 kfree(stream);
3353 }
3354
3355 /**
3356 * i915_perf_release - handles userspace close() of a stream file
3357 * @inode: anonymous inode associated with file
3358 * @file: An i915 perf stream file
3359 *
3360 * Cleans up any resources associated with an open i915 perf stream file.
3361 *
3362 * NB: close() can't really fail from the userspace point of view.
3363 *
3364 * Returns: zero on success or a negative error code.
3365 */
i915_perf_release(struct inode * inode,struct file * file)3366 static int i915_perf_release(struct inode *inode, struct file *file)
3367 {
3368 struct i915_perf_stream *stream = file->private_data;
3369 struct i915_perf *perf = stream->perf;
3370
3371 mutex_lock(&perf->lock);
3372 i915_perf_destroy_locked(stream);
3373 mutex_unlock(&perf->lock);
3374
3375 /* Release the reference the perf stream kept on the driver. */
3376 drm_dev_put(&perf->i915->drm);
3377
3378 return 0;
3379 }
3380
3381
3382 static const struct file_operations fops = {
3383 .owner = THIS_MODULE,
3384 .llseek = no_llseek,
3385 .release = i915_perf_release,
3386 .poll = i915_perf_poll,
3387 .read = i915_perf_read,
3388 .unlocked_ioctl = i915_perf_ioctl,
3389 /* Our ioctl have no arguments, so it's safe to use the same function
3390 * to handle 32bits compatibility.
3391 */
3392 .compat_ioctl = i915_perf_ioctl,
3393 };
3394
3395
3396 /**
3397 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3398 * @perf: i915 perf instance
3399 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3400 * @props: individually validated u64 property value pairs
3401 * @file: drm file
3402 *
3403 * See i915_perf_ioctl_open() for interface details.
3404 *
3405 * Implements further stream config validation and stream initialization on
3406 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3407 * taken to serialize with any non-file-operation driver hooks.
3408 *
3409 * Note: at this point the @props have only been validated in isolation and
3410 * it's still necessary to validate that the combination of properties makes
3411 * sense.
3412 *
3413 * In the case where userspace is interested in OA unit metrics then further
3414 * config validation and stream initialization details will be handled by
3415 * i915_oa_stream_init(). The code here should only validate config state that
3416 * will be relevant to all stream types / backends.
3417 *
3418 * Returns: zero on success or a negative error code.
3419 */
3420 static int
i915_perf_open_ioctl_locked(struct i915_perf * perf,struct drm_i915_perf_open_param * param,struct perf_open_properties * props,struct drm_file * file)3421 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3422 struct drm_i915_perf_open_param *param,
3423 struct perf_open_properties *props,
3424 struct drm_file *file)
3425 {
3426 struct i915_gem_context *specific_ctx = NULL;
3427 struct i915_perf_stream *stream = NULL;
3428 unsigned long f_flags = 0;
3429 bool privileged_op = true;
3430 int stream_fd;
3431 int ret;
3432
3433 if (props->single_context) {
3434 u32 ctx_handle = props->ctx_handle;
3435 struct drm_i915_file_private *file_priv = file->driver_priv;
3436
3437 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3438 if (IS_ERR(specific_ctx)) {
3439 drm_dbg(&perf->i915->drm,
3440 "Failed to look up context with ID %u for opening perf stream\n",
3441 ctx_handle);
3442 ret = PTR_ERR(specific_ctx);
3443 goto err;
3444 }
3445 }
3446
3447 /*
3448 * On Haswell the OA unit supports clock gating off for a specific
3449 * context and in this mode there's no visibility of metrics for the
3450 * rest of the system, which we consider acceptable for a
3451 * non-privileged client.
3452 *
3453 * For Gen8->11 the OA unit no longer supports clock gating off for a
3454 * specific context and the kernel can't securely stop the counters
3455 * from updating as system-wide / global values. Even though we can
3456 * filter reports based on the included context ID we can't block
3457 * clients from seeing the raw / global counter values via
3458 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3459 * enable the OA unit by default.
3460 *
3461 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3462 * per context basis. So we can relax requirements there if the user
3463 * doesn't request global stream access (i.e. query based sampling
3464 * using MI_RECORD_PERF_COUNT.
3465 */
3466 if (IS_HASWELL(perf->i915) && specific_ctx)
3467 privileged_op = false;
3468 else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3469 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3470 privileged_op = false;
3471
3472 if (props->hold_preemption) {
3473 if (!props->single_context) {
3474 drm_dbg(&perf->i915->drm,
3475 "preemption disable with no context\n");
3476 ret = -EINVAL;
3477 goto err;
3478 }
3479 privileged_op = true;
3480 }
3481
3482 /*
3483 * Asking for SSEU configuration is a priviliged operation.
3484 */
3485 if (props->has_sseu)
3486 privileged_op = true;
3487 else
3488 get_default_sseu_config(&props->sseu, props->engine);
3489
3490 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3491 * we check a dev.i915.perf_stream_paranoid sysctl option
3492 * to determine if it's ok to access system wide OA counters
3493 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3494 */
3495 if (privileged_op &&
3496 i915_perf_stream_paranoid && !perfmon_capable()) {
3497 drm_dbg(&perf->i915->drm,
3498 "Insufficient privileges to open i915 perf stream\n");
3499 ret = -EACCES;
3500 goto err_ctx;
3501 }
3502
3503 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3504 if (!stream) {
3505 ret = -ENOMEM;
3506 goto err_ctx;
3507 }
3508
3509 stream->perf = perf;
3510 stream->ctx = specific_ctx;
3511 stream->poll_oa_period = props->poll_oa_period;
3512
3513 ret = i915_oa_stream_init(stream, param, props);
3514 if (ret)
3515 goto err_alloc;
3516
3517 /* we avoid simply assigning stream->sample_flags = props->sample_flags
3518 * to have _stream_init check the combination of sample flags more
3519 * thoroughly, but still this is the expected result at this point.
3520 */
3521 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3522 ret = -ENODEV;
3523 goto err_flags;
3524 }
3525
3526 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3527 f_flags |= O_CLOEXEC;
3528 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3529 f_flags |= O_NONBLOCK;
3530
3531 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3532 if (stream_fd < 0) {
3533 ret = stream_fd;
3534 goto err_flags;
3535 }
3536
3537 if (!(param->flags & I915_PERF_FLAG_DISABLED))
3538 i915_perf_enable_locked(stream);
3539
3540 /* Take a reference on the driver that will be kept with stream_fd
3541 * until its release.
3542 */
3543 drm_dev_get(&perf->i915->drm);
3544
3545 return stream_fd;
3546
3547 err_flags:
3548 if (stream->ops->destroy)
3549 stream->ops->destroy(stream);
3550 err_alloc:
3551 kfree(stream);
3552 err_ctx:
3553 if (specific_ctx)
3554 i915_gem_context_put(specific_ctx);
3555 err:
3556 return ret;
3557 }
3558
oa_exponent_to_ns(struct i915_perf * perf,int exponent)3559 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3560 {
3561 return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
3562 2ULL << exponent);
3563 }
3564
3565 static __always_inline bool
oa_format_valid(struct i915_perf * perf,enum drm_i915_oa_format format)3566 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3567 {
3568 return test_bit(format, perf->format_mask);
3569 }
3570
3571 static __always_inline void
oa_format_add(struct i915_perf * perf,enum drm_i915_oa_format format)3572 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3573 {
3574 __set_bit(format, perf->format_mask);
3575 }
3576
3577 /**
3578 * read_properties_unlocked - validate + copy userspace stream open properties
3579 * @perf: i915 perf instance
3580 * @uprops: The array of u64 key value pairs given by userspace
3581 * @n_props: The number of key value pairs expected in @uprops
3582 * @props: The stream configuration built up while validating properties
3583 *
3584 * Note this function only validates properties in isolation it doesn't
3585 * validate that the combination of properties makes sense or that all
3586 * properties necessary for a particular kind of stream have been set.
3587 *
3588 * Note that there currently aren't any ordering requirements for properties so
3589 * we shouldn't validate or assume anything about ordering here. This doesn't
3590 * rule out defining new properties with ordering requirements in the future.
3591 */
read_properties_unlocked(struct i915_perf * perf,u64 __user * uprops,u32 n_props,struct perf_open_properties * props)3592 static int read_properties_unlocked(struct i915_perf *perf,
3593 u64 __user *uprops,
3594 u32 n_props,
3595 struct perf_open_properties *props)
3596 {
3597 u64 __user *uprop = uprops;
3598 u32 i;
3599 int ret;
3600
3601 memset(props, 0, sizeof(struct perf_open_properties));
3602 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3603
3604 if (!n_props) {
3605 drm_dbg(&perf->i915->drm,
3606 "No i915 perf properties given\n");
3607 return -EINVAL;
3608 }
3609
3610 /* At the moment we only support using i915-perf on the RCS. */
3611 props->engine = intel_engine_lookup_user(perf->i915,
3612 I915_ENGINE_CLASS_RENDER,
3613 0);
3614 if (!props->engine) {
3615 drm_dbg(&perf->i915->drm,
3616 "No RENDER-capable engines\n");
3617 return -EINVAL;
3618 }
3619
3620 /* Considering that ID = 0 is reserved and assuming that we don't
3621 * (currently) expect any configurations to ever specify duplicate
3622 * values for a particular property ID then the last _PROP_MAX value is
3623 * one greater than the maximum number of properties we expect to get
3624 * from userspace.
3625 */
3626 if (n_props >= DRM_I915_PERF_PROP_MAX) {
3627 drm_dbg(&perf->i915->drm,
3628 "More i915 perf properties specified than exist\n");
3629 return -EINVAL;
3630 }
3631
3632 for (i = 0; i < n_props; i++) {
3633 u64 oa_period, oa_freq_hz;
3634 u64 id, value;
3635
3636 ret = get_user(id, uprop);
3637 if (ret)
3638 return ret;
3639
3640 ret = get_user(value, uprop + 1);
3641 if (ret)
3642 return ret;
3643
3644 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3645 drm_dbg(&perf->i915->drm,
3646 "Unknown i915 perf property ID\n");
3647 return -EINVAL;
3648 }
3649
3650 switch ((enum drm_i915_perf_property_id)id) {
3651 case DRM_I915_PERF_PROP_CTX_HANDLE:
3652 props->single_context = 1;
3653 props->ctx_handle = value;
3654 break;
3655 case DRM_I915_PERF_PROP_SAMPLE_OA:
3656 if (value)
3657 props->sample_flags |= SAMPLE_OA_REPORT;
3658 break;
3659 case DRM_I915_PERF_PROP_OA_METRICS_SET:
3660 if (value == 0) {
3661 drm_dbg(&perf->i915->drm,
3662 "Unknown OA metric set ID\n");
3663 return -EINVAL;
3664 }
3665 props->metrics_set = value;
3666 break;
3667 case DRM_I915_PERF_PROP_OA_FORMAT:
3668 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3669 drm_dbg(&perf->i915->drm,
3670 "Out-of-range OA report format %llu\n",
3671 value);
3672 return -EINVAL;
3673 }
3674 if (!oa_format_valid(perf, value)) {
3675 drm_dbg(&perf->i915->drm,
3676 "Unsupported OA report format %llu\n",
3677 value);
3678 return -EINVAL;
3679 }
3680 props->oa_format = value;
3681 break;
3682 case DRM_I915_PERF_PROP_OA_EXPONENT:
3683 if (value > OA_EXPONENT_MAX) {
3684 drm_dbg(&perf->i915->drm,
3685 "OA timer exponent too high (> %u)\n",
3686 OA_EXPONENT_MAX);
3687 return -EINVAL;
3688 }
3689
3690 /* Theoretically we can program the OA unit to sample
3691 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3692 * for BXT. We don't allow such high sampling
3693 * frequencies by default unless root.
3694 */
3695
3696 BUILD_BUG_ON(sizeof(oa_period) != 8);
3697 oa_period = oa_exponent_to_ns(perf, value);
3698
3699 /* This check is primarily to ensure that oa_period <=
3700 * UINT32_MAX (before passing to do_div which only
3701 * accepts a u32 denominator), but we can also skip
3702 * checking anything < 1Hz which implicitly can't be
3703 * limited via an integer oa_max_sample_rate.
3704 */
3705 if (oa_period <= NSEC_PER_SEC) {
3706 u64 tmp = NSEC_PER_SEC;
3707 do_div(tmp, oa_period);
3708 oa_freq_hz = tmp;
3709 } else
3710 oa_freq_hz = 0;
3711
3712 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3713 drm_dbg(&perf->i915->drm,
3714 "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3715 i915_oa_max_sample_rate);
3716 return -EACCES;
3717 }
3718
3719 props->oa_periodic = true;
3720 props->oa_period_exponent = value;
3721 break;
3722 case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3723 props->hold_preemption = !!value;
3724 break;
3725 case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3726 struct drm_i915_gem_context_param_sseu user_sseu;
3727
3728 if (copy_from_user(&user_sseu,
3729 u64_to_user_ptr(value),
3730 sizeof(user_sseu))) {
3731 drm_dbg(&perf->i915->drm,
3732 "Unable to copy global sseu parameter\n");
3733 return -EFAULT;
3734 }
3735
3736 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3737 if (ret) {
3738 drm_dbg(&perf->i915->drm,
3739 "Invalid SSEU configuration\n");
3740 return ret;
3741 }
3742 props->has_sseu = true;
3743 break;
3744 }
3745 case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3746 if (value < 100000 /* 100us */) {
3747 drm_dbg(&perf->i915->drm,
3748 "OA availability timer too small (%lluns < 100us)\n",
3749 value);
3750 return -EINVAL;
3751 }
3752 props->poll_oa_period = value;
3753 break;
3754 case DRM_I915_PERF_PROP_MAX:
3755 MISSING_CASE(id);
3756 return -EINVAL;
3757 }
3758
3759 uprop += 2;
3760 }
3761
3762 return 0;
3763 }
3764
3765 /**
3766 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3767 * @dev: drm device
3768 * @data: ioctl data copied from userspace (unvalidated)
3769 * @file: drm file
3770 *
3771 * Validates the stream open parameters given by userspace including flags
3772 * and an array of u64 key, value pair properties.
3773 *
3774 * Very little is assumed up front about the nature of the stream being
3775 * opened (for instance we don't assume it's for periodic OA unit metrics). An
3776 * i915-perf stream is expected to be a suitable interface for other forms of
3777 * buffered data written by the GPU besides periodic OA metrics.
3778 *
3779 * Note we copy the properties from userspace outside of the i915 perf
3780 * mutex to avoid an awkward lockdep with mmap_lock.
3781 *
3782 * Most of the implementation details are handled by
3783 * i915_perf_open_ioctl_locked() after taking the &perf->lock
3784 * mutex for serializing with any non-file-operation driver hooks.
3785 *
3786 * Return: A newly opened i915 Perf stream file descriptor or negative
3787 * error code on failure.
3788 */
i915_perf_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3789 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3790 struct drm_file *file)
3791 {
3792 struct i915_perf *perf = &to_i915(dev)->perf;
3793 struct drm_i915_perf_open_param *param = data;
3794 struct perf_open_properties props;
3795 u32 known_open_flags;
3796 int ret;
3797
3798 if (!perf->i915)
3799 return -ENOTSUPP;
3800
3801 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3802 I915_PERF_FLAG_FD_NONBLOCK |
3803 I915_PERF_FLAG_DISABLED;
3804 if (param->flags & ~known_open_flags) {
3805 drm_dbg(&perf->i915->drm,
3806 "Unknown drm_i915_perf_open_param flag\n");
3807 return -EINVAL;
3808 }
3809
3810 ret = read_properties_unlocked(perf,
3811 u64_to_user_ptr(param->properties_ptr),
3812 param->num_properties,
3813 &props);
3814 if (ret)
3815 return ret;
3816
3817 mutex_lock(&perf->lock);
3818 ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3819 mutex_unlock(&perf->lock);
3820
3821 return ret;
3822 }
3823
3824 /**
3825 * i915_perf_register - exposes i915-perf to userspace
3826 * @i915: i915 device instance
3827 *
3828 * In particular OA metric sets are advertised under a sysfs metrics/
3829 * directory allowing userspace to enumerate valid IDs that can be
3830 * used to open an i915-perf stream.
3831 */
i915_perf_register(struct drm_i915_private * i915)3832 void i915_perf_register(struct drm_i915_private *i915)
3833 {
3834 struct i915_perf *perf = &i915->perf;
3835
3836 if (!perf->i915)
3837 return;
3838
3839 /* To be sure we're synchronized with an attempted
3840 * i915_perf_open_ioctl(); considering that we register after
3841 * being exposed to userspace.
3842 */
3843 mutex_lock(&perf->lock);
3844
3845 perf->metrics_kobj =
3846 kobject_create_and_add("metrics",
3847 &i915->drm.primary->kdev->kobj);
3848
3849 mutex_unlock(&perf->lock);
3850 }
3851
3852 /**
3853 * i915_perf_unregister - hide i915-perf from userspace
3854 * @i915: i915 device instance
3855 *
3856 * i915-perf state cleanup is split up into an 'unregister' and
3857 * 'deinit' phase where the interface is first hidden from
3858 * userspace by i915_perf_unregister() before cleaning up
3859 * remaining state in i915_perf_fini().
3860 */
i915_perf_unregister(struct drm_i915_private * i915)3861 void i915_perf_unregister(struct drm_i915_private *i915)
3862 {
3863 struct i915_perf *perf = &i915->perf;
3864
3865 if (!perf->metrics_kobj)
3866 return;
3867
3868 kobject_put(perf->metrics_kobj);
3869 perf->metrics_kobj = NULL;
3870 }
3871
gen8_is_valid_flex_addr(struct i915_perf * perf,u32 addr)3872 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3873 {
3874 static const i915_reg_t flex_eu_regs[] = {
3875 EU_PERF_CNTL0,
3876 EU_PERF_CNTL1,
3877 EU_PERF_CNTL2,
3878 EU_PERF_CNTL3,
3879 EU_PERF_CNTL4,
3880 EU_PERF_CNTL5,
3881 EU_PERF_CNTL6,
3882 };
3883 int i;
3884
3885 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3886 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3887 return true;
3888 }
3889 return false;
3890 }
3891
3892 #define ADDR_IN_RANGE(addr, start, end) \
3893 ((addr) >= (start) && \
3894 (addr) <= (end))
3895
3896 #define REG_IN_RANGE(addr, start, end) \
3897 ((addr) >= i915_mmio_reg_offset(start) && \
3898 (addr) <= i915_mmio_reg_offset(end))
3899
3900 #define REG_EQUAL(addr, mmio) \
3901 ((addr) == i915_mmio_reg_offset(mmio))
3902
gen7_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)3903 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3904 {
3905 return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
3906 REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
3907 REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
3908 }
3909
gen7_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3910 static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3911 {
3912 return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
3913 REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
3914 REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
3915 REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
3916 }
3917
gen8_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3918 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3919 {
3920 return gen7_is_valid_mux_addr(perf, addr) ||
3921 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3922 REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
3923 }
3924
gen11_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3925 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3926 {
3927 return gen8_is_valid_mux_addr(perf, addr) ||
3928 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3929 REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
3930 }
3931
hsw_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3932 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3933 {
3934 return gen7_is_valid_mux_addr(perf, addr) ||
3935 ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
3936 REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
3937 REG_EQUAL(addr, HSW_MBVID2_MISR0);
3938 }
3939
chv_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3940 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3941 {
3942 return gen7_is_valid_mux_addr(perf, addr) ||
3943 ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
3944 }
3945
gen12_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)3946 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3947 {
3948 return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
3949 REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
3950 REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
3951 REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
3952 REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
3953 REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
3954 REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
3955 }
3956
gen12_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3957 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3958 {
3959 return REG_EQUAL(addr, NOA_WRITE) ||
3960 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3961 REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
3962 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3963 REG_EQUAL(addr, RPM_CONFIG0) ||
3964 REG_EQUAL(addr, RPM_CONFIG1) ||
3965 REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
3966 }
3967
mask_reg_value(u32 reg,u32 val)3968 static u32 mask_reg_value(u32 reg, u32 val)
3969 {
3970 /* HALF_SLICE_CHICKEN2 is programmed with a the
3971 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3972 * programmed by userspace doesn't change this.
3973 */
3974 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3975 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3976
3977 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3978 * indicated by its name and a bunch of selection fields used by OA
3979 * configs.
3980 */
3981 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3982 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3983
3984 return val;
3985 }
3986
alloc_oa_regs(struct i915_perf * perf,bool (* is_valid)(struct i915_perf * perf,u32 addr),u32 __user * regs,u32 n_regs)3987 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
3988 bool (*is_valid)(struct i915_perf *perf, u32 addr),
3989 u32 __user *regs,
3990 u32 n_regs)
3991 {
3992 struct i915_oa_reg *oa_regs;
3993 int err;
3994 u32 i;
3995
3996 if (!n_regs)
3997 return NULL;
3998
3999 /* No is_valid function means we're not allowing any register to be programmed. */
4000 GEM_BUG_ON(!is_valid);
4001 if (!is_valid)
4002 return ERR_PTR(-EINVAL);
4003
4004 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4005 if (!oa_regs)
4006 return ERR_PTR(-ENOMEM);
4007
4008 for (i = 0; i < n_regs; i++) {
4009 u32 addr, value;
4010
4011 err = get_user(addr, regs);
4012 if (err)
4013 goto addr_err;
4014
4015 if (!is_valid(perf, addr)) {
4016 drm_dbg(&perf->i915->drm,
4017 "Invalid oa_reg address: %X\n", addr);
4018 err = -EINVAL;
4019 goto addr_err;
4020 }
4021
4022 err = get_user(value, regs + 1);
4023 if (err)
4024 goto addr_err;
4025
4026 oa_regs[i].addr = _MMIO(addr);
4027 oa_regs[i].value = mask_reg_value(addr, value);
4028
4029 regs += 2;
4030 }
4031
4032 return oa_regs;
4033
4034 addr_err:
4035 kfree(oa_regs);
4036 return ERR_PTR(err);
4037 }
4038
show_dynamic_id(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4039 static ssize_t show_dynamic_id(struct kobject *kobj,
4040 struct kobj_attribute *attr,
4041 char *buf)
4042 {
4043 struct i915_oa_config *oa_config =
4044 container_of(attr, typeof(*oa_config), sysfs_metric_id);
4045
4046 return sprintf(buf, "%d\n", oa_config->id);
4047 }
4048
create_dynamic_oa_sysfs_entry(struct i915_perf * perf,struct i915_oa_config * oa_config)4049 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4050 struct i915_oa_config *oa_config)
4051 {
4052 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4053 oa_config->sysfs_metric_id.attr.name = "id";
4054 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4055 oa_config->sysfs_metric_id.show = show_dynamic_id;
4056 oa_config->sysfs_metric_id.store = NULL;
4057
4058 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4059 oa_config->attrs[1] = NULL;
4060
4061 oa_config->sysfs_metric.name = oa_config->uuid;
4062 oa_config->sysfs_metric.attrs = oa_config->attrs;
4063
4064 return sysfs_create_group(perf->metrics_kobj,
4065 &oa_config->sysfs_metric);
4066 }
4067
4068 /**
4069 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4070 * @dev: drm device
4071 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4072 * userspace (unvalidated)
4073 * @file: drm file
4074 *
4075 * Validates the submitted OA register to be saved into a new OA config that
4076 * can then be used for programming the OA unit and its NOA network.
4077 *
4078 * Returns: A new allocated config number to be used with the perf open ioctl
4079 * or a negative error code on failure.
4080 */
i915_perf_add_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4081 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4082 struct drm_file *file)
4083 {
4084 struct i915_perf *perf = &to_i915(dev)->perf;
4085 struct drm_i915_perf_oa_config *args = data;
4086 struct i915_oa_config *oa_config, *tmp;
4087 struct i915_oa_reg *regs;
4088 int err, id;
4089
4090 if (!perf->i915)
4091 return -ENOTSUPP;
4092
4093 if (!perf->metrics_kobj) {
4094 drm_dbg(&perf->i915->drm,
4095 "OA metrics weren't advertised via sysfs\n");
4096 return -EINVAL;
4097 }
4098
4099 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4100 drm_dbg(&perf->i915->drm,
4101 "Insufficient privileges to add i915 OA config\n");
4102 return -EACCES;
4103 }
4104
4105 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4106 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4107 (!args->flex_regs_ptr || !args->n_flex_regs)) {
4108 drm_dbg(&perf->i915->drm,
4109 "No OA registers given\n");
4110 return -EINVAL;
4111 }
4112
4113 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4114 if (!oa_config) {
4115 drm_dbg(&perf->i915->drm,
4116 "Failed to allocate memory for the OA config\n");
4117 return -ENOMEM;
4118 }
4119
4120 oa_config->perf = perf;
4121 kref_init(&oa_config->ref);
4122
4123 if (!uuid_is_valid(args->uuid)) {
4124 drm_dbg(&perf->i915->drm,
4125 "Invalid uuid format for OA config\n");
4126 err = -EINVAL;
4127 goto reg_err;
4128 }
4129
4130 /* Last character in oa_config->uuid will be 0 because oa_config is
4131 * kzalloc.
4132 */
4133 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4134
4135 oa_config->mux_regs_len = args->n_mux_regs;
4136 regs = alloc_oa_regs(perf,
4137 perf->ops.is_valid_mux_reg,
4138 u64_to_user_ptr(args->mux_regs_ptr),
4139 args->n_mux_regs);
4140
4141 if (IS_ERR(regs)) {
4142 drm_dbg(&perf->i915->drm,
4143 "Failed to create OA config for mux_regs\n");
4144 err = PTR_ERR(regs);
4145 goto reg_err;
4146 }
4147 oa_config->mux_regs = regs;
4148
4149 oa_config->b_counter_regs_len = args->n_boolean_regs;
4150 regs = alloc_oa_regs(perf,
4151 perf->ops.is_valid_b_counter_reg,
4152 u64_to_user_ptr(args->boolean_regs_ptr),
4153 args->n_boolean_regs);
4154
4155 if (IS_ERR(regs)) {
4156 drm_dbg(&perf->i915->drm,
4157 "Failed to create OA config for b_counter_regs\n");
4158 err = PTR_ERR(regs);
4159 goto reg_err;
4160 }
4161 oa_config->b_counter_regs = regs;
4162
4163 if (GRAPHICS_VER(perf->i915) < 8) {
4164 if (args->n_flex_regs != 0) {
4165 err = -EINVAL;
4166 goto reg_err;
4167 }
4168 } else {
4169 oa_config->flex_regs_len = args->n_flex_regs;
4170 regs = alloc_oa_regs(perf,
4171 perf->ops.is_valid_flex_reg,
4172 u64_to_user_ptr(args->flex_regs_ptr),
4173 args->n_flex_regs);
4174
4175 if (IS_ERR(regs)) {
4176 drm_dbg(&perf->i915->drm,
4177 "Failed to create OA config for flex_regs\n");
4178 err = PTR_ERR(regs);
4179 goto reg_err;
4180 }
4181 oa_config->flex_regs = regs;
4182 }
4183
4184 err = mutex_lock_interruptible(&perf->metrics_lock);
4185 if (err)
4186 goto reg_err;
4187
4188 /* We shouldn't have too many configs, so this iteration shouldn't be
4189 * too costly.
4190 */
4191 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4192 if (!strcmp(tmp->uuid, oa_config->uuid)) {
4193 drm_dbg(&perf->i915->drm,
4194 "OA config already exists with this uuid\n");
4195 err = -EADDRINUSE;
4196 goto sysfs_err;
4197 }
4198 }
4199
4200 err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4201 if (err) {
4202 drm_dbg(&perf->i915->drm,
4203 "Failed to create sysfs entry for OA config\n");
4204 goto sysfs_err;
4205 }
4206
4207 /* Config id 0 is invalid, id 1 for kernel stored test config. */
4208 oa_config->id = idr_alloc(&perf->metrics_idr,
4209 oa_config, 2,
4210 0, GFP_KERNEL);
4211 if (oa_config->id < 0) {
4212 drm_dbg(&perf->i915->drm,
4213 "Failed to create sysfs entry for OA config\n");
4214 err = oa_config->id;
4215 goto sysfs_err;
4216 }
4217 id = oa_config->id;
4218
4219 drm_dbg(&perf->i915->drm,
4220 "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4221 mutex_unlock(&perf->metrics_lock);
4222
4223 return id;
4224
4225 sysfs_err:
4226 mutex_unlock(&perf->metrics_lock);
4227 reg_err:
4228 i915_oa_config_put(oa_config);
4229 drm_dbg(&perf->i915->drm,
4230 "Failed to add new OA config\n");
4231 return err;
4232 }
4233
4234 /**
4235 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4236 * @dev: drm device
4237 * @data: ioctl data (pointer to u64 integer) copied from userspace
4238 * @file: drm file
4239 *
4240 * Configs can be removed while being used, the will stop appearing in sysfs
4241 * and their content will be freed when the stream using the config is closed.
4242 *
4243 * Returns: 0 on success or a negative error code on failure.
4244 */
i915_perf_remove_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4245 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4246 struct drm_file *file)
4247 {
4248 struct i915_perf *perf = &to_i915(dev)->perf;
4249 u64 *arg = data;
4250 struct i915_oa_config *oa_config;
4251 int ret;
4252
4253 if (!perf->i915)
4254 return -ENOTSUPP;
4255
4256 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4257 drm_dbg(&perf->i915->drm,
4258 "Insufficient privileges to remove i915 OA config\n");
4259 return -EACCES;
4260 }
4261
4262 ret = mutex_lock_interruptible(&perf->metrics_lock);
4263 if (ret)
4264 return ret;
4265
4266 oa_config = idr_find(&perf->metrics_idr, *arg);
4267 if (!oa_config) {
4268 drm_dbg(&perf->i915->drm,
4269 "Failed to remove unknown OA config\n");
4270 ret = -ENOENT;
4271 goto err_unlock;
4272 }
4273
4274 GEM_BUG_ON(*arg != oa_config->id);
4275
4276 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4277
4278 idr_remove(&perf->metrics_idr, *arg);
4279
4280 mutex_unlock(&perf->metrics_lock);
4281
4282 drm_dbg(&perf->i915->drm,
4283 "Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4284
4285 i915_oa_config_put(oa_config);
4286
4287 return 0;
4288
4289 err_unlock:
4290 mutex_unlock(&perf->metrics_lock);
4291 return ret;
4292 }
4293
4294 static struct ctl_table oa_table[] = {
4295 {
4296 .procname = "perf_stream_paranoid",
4297 .data = &i915_perf_stream_paranoid,
4298 .maxlen = sizeof(i915_perf_stream_paranoid),
4299 .mode = 0644,
4300 .proc_handler = proc_dointvec_minmax,
4301 .extra1 = SYSCTL_ZERO,
4302 .extra2 = SYSCTL_ONE,
4303 },
4304 {
4305 .procname = "oa_max_sample_rate",
4306 .data = &i915_oa_max_sample_rate,
4307 .maxlen = sizeof(i915_oa_max_sample_rate),
4308 .mode = 0644,
4309 .proc_handler = proc_dointvec_minmax,
4310 .extra1 = SYSCTL_ZERO,
4311 .extra2 = &oa_sample_rate_hard_limit,
4312 },
4313 {}
4314 };
4315
4316 static struct ctl_table i915_root[] = {
4317 {
4318 .procname = "i915",
4319 .maxlen = 0,
4320 .mode = 0555,
4321 .child = oa_table,
4322 },
4323 {}
4324 };
4325
4326 static struct ctl_table dev_root[] = {
4327 {
4328 .procname = "dev",
4329 .maxlen = 0,
4330 .mode = 0555,
4331 .child = i915_root,
4332 },
4333 {}
4334 };
4335
oa_init_supported_formats(struct i915_perf * perf)4336 static void oa_init_supported_formats(struct i915_perf *perf)
4337 {
4338 struct drm_i915_private *i915 = perf->i915;
4339 enum intel_platform platform = INTEL_INFO(i915)->platform;
4340
4341 switch (platform) {
4342 case INTEL_HASWELL:
4343 oa_format_add(perf, I915_OA_FORMAT_A13);
4344 oa_format_add(perf, I915_OA_FORMAT_A13);
4345 oa_format_add(perf, I915_OA_FORMAT_A29);
4346 oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4347 oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4348 oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4349 oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4350 oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4351 break;
4352
4353 case INTEL_BROADWELL:
4354 case INTEL_CHERRYVIEW:
4355 case INTEL_SKYLAKE:
4356 case INTEL_BROXTON:
4357 case INTEL_KABYLAKE:
4358 case INTEL_GEMINILAKE:
4359 case INTEL_COFFEELAKE:
4360 case INTEL_COMETLAKE:
4361 case INTEL_ICELAKE:
4362 case INTEL_ELKHARTLAKE:
4363 case INTEL_JASPERLAKE:
4364 case INTEL_TIGERLAKE:
4365 case INTEL_ROCKETLAKE:
4366 case INTEL_DG1:
4367 case INTEL_ALDERLAKE_S:
4368 case INTEL_ALDERLAKE_P:
4369 oa_format_add(perf, I915_OA_FORMAT_A12);
4370 oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4371 oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4372 oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4373 break;
4374
4375 default:
4376 MISSING_CASE(platform);
4377 }
4378 }
4379
4380 /**
4381 * i915_perf_init - initialize i915-perf state on module bind
4382 * @i915: i915 device instance
4383 *
4384 * Initializes i915-perf state without exposing anything to userspace.
4385 *
4386 * Note: i915-perf initialization is split into an 'init' and 'register'
4387 * phase with the i915_perf_register() exposing state to userspace.
4388 */
i915_perf_init(struct drm_i915_private * i915)4389 void i915_perf_init(struct drm_i915_private *i915)
4390 {
4391 struct i915_perf *perf = &i915->perf;
4392
4393 /* XXX const struct i915_perf_ops! */
4394
4395 perf->oa_formats = oa_formats;
4396 if (IS_HASWELL(i915)) {
4397 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4398 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4399 perf->ops.is_valid_flex_reg = NULL;
4400 perf->ops.enable_metric_set = hsw_enable_metric_set;
4401 perf->ops.disable_metric_set = hsw_disable_metric_set;
4402 perf->ops.oa_enable = gen7_oa_enable;
4403 perf->ops.oa_disable = gen7_oa_disable;
4404 perf->ops.read = gen7_oa_read;
4405 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4406 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4407 /* Note: that although we could theoretically also support the
4408 * legacy ringbuffer mode on BDW (and earlier iterations of
4409 * this driver, before upstreaming did this) it didn't seem
4410 * worth the complexity to maintain now that BDW+ enable
4411 * execlist mode by default.
4412 */
4413 perf->ops.read = gen8_oa_read;
4414
4415 if (IS_GRAPHICS_VER(i915, 8, 9)) {
4416 perf->ops.is_valid_b_counter_reg =
4417 gen7_is_valid_b_counter_addr;
4418 perf->ops.is_valid_mux_reg =
4419 gen8_is_valid_mux_addr;
4420 perf->ops.is_valid_flex_reg =
4421 gen8_is_valid_flex_addr;
4422
4423 if (IS_CHERRYVIEW(i915)) {
4424 perf->ops.is_valid_mux_reg =
4425 chv_is_valid_mux_addr;
4426 }
4427
4428 perf->ops.oa_enable = gen8_oa_enable;
4429 perf->ops.oa_disable = gen8_oa_disable;
4430 perf->ops.enable_metric_set = gen8_enable_metric_set;
4431 perf->ops.disable_metric_set = gen8_disable_metric_set;
4432 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4433
4434 if (GRAPHICS_VER(i915) == 8) {
4435 perf->ctx_oactxctrl_offset = 0x120;
4436 perf->ctx_flexeu0_offset = 0x2ce;
4437
4438 perf->gen8_valid_ctx_bit = BIT(25);
4439 } else {
4440 perf->ctx_oactxctrl_offset = 0x128;
4441 perf->ctx_flexeu0_offset = 0x3de;
4442
4443 perf->gen8_valid_ctx_bit = BIT(16);
4444 }
4445 } else if (GRAPHICS_VER(i915) == 11) {
4446 perf->ops.is_valid_b_counter_reg =
4447 gen7_is_valid_b_counter_addr;
4448 perf->ops.is_valid_mux_reg =
4449 gen11_is_valid_mux_addr;
4450 perf->ops.is_valid_flex_reg =
4451 gen8_is_valid_flex_addr;
4452
4453 perf->ops.oa_enable = gen8_oa_enable;
4454 perf->ops.oa_disable = gen8_oa_disable;
4455 perf->ops.enable_metric_set = gen8_enable_metric_set;
4456 perf->ops.disable_metric_set = gen11_disable_metric_set;
4457 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4458
4459 perf->ctx_oactxctrl_offset = 0x124;
4460 perf->ctx_flexeu0_offset = 0x78e;
4461
4462 perf->gen8_valid_ctx_bit = BIT(16);
4463 } else if (GRAPHICS_VER(i915) == 12) {
4464 perf->ops.is_valid_b_counter_reg =
4465 gen12_is_valid_b_counter_addr;
4466 perf->ops.is_valid_mux_reg =
4467 gen12_is_valid_mux_addr;
4468 perf->ops.is_valid_flex_reg =
4469 gen8_is_valid_flex_addr;
4470
4471 perf->ops.oa_enable = gen12_oa_enable;
4472 perf->ops.oa_disable = gen12_oa_disable;
4473 perf->ops.enable_metric_set = gen12_enable_metric_set;
4474 perf->ops.disable_metric_set = gen12_disable_metric_set;
4475 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4476
4477 perf->ctx_flexeu0_offset = 0;
4478 perf->ctx_oactxctrl_offset = 0x144;
4479 }
4480 }
4481
4482 if (perf->ops.enable_metric_set) {
4483 mutex_init(&perf->lock);
4484
4485 /* Choose a representative limit */
4486 oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
4487
4488 mutex_init(&perf->metrics_lock);
4489 idr_init_base(&perf->metrics_idr, 1);
4490
4491 /* We set up some ratelimit state to potentially throttle any
4492 * _NOTES about spurious, invalid OA reports which we don't
4493 * forward to userspace.
4494 *
4495 * We print a _NOTE about any throttling when closing the
4496 * stream instead of waiting until driver _fini which no one
4497 * would ever see.
4498 *
4499 * Using the same limiting factors as printk_ratelimit()
4500 */
4501 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4502 /* Since we use a DRM_NOTE for spurious reports it would be
4503 * inconsistent to let __ratelimit() automatically print a
4504 * warning for throttling.
4505 */
4506 ratelimit_set_flags(&perf->spurious_report_rs,
4507 RATELIMIT_MSG_ON_RELEASE);
4508
4509 ratelimit_state_init(&perf->tail_pointer_race,
4510 5 * HZ, 10);
4511 ratelimit_set_flags(&perf->tail_pointer_race,
4512 RATELIMIT_MSG_ON_RELEASE);
4513
4514 atomic64_set(&perf->noa_programming_delay,
4515 500 * 1000 /* 500us */);
4516
4517 perf->i915 = i915;
4518
4519 oa_init_supported_formats(perf);
4520 }
4521 }
4522
destroy_config(int id,void * p,void * data)4523 static int destroy_config(int id, void *p, void *data)
4524 {
4525 i915_oa_config_put(p);
4526 return 0;
4527 }
4528
i915_perf_sysctl_register(void)4529 int i915_perf_sysctl_register(void)
4530 {
4531 sysctl_header = register_sysctl_table(dev_root);
4532 return 0;
4533 }
4534
i915_perf_sysctl_unregister(void)4535 void i915_perf_sysctl_unregister(void)
4536 {
4537 unregister_sysctl_table(sysctl_header);
4538 }
4539
4540 /**
4541 * i915_perf_fini - Counter part to i915_perf_init()
4542 * @i915: i915 device instance
4543 */
i915_perf_fini(struct drm_i915_private * i915)4544 void i915_perf_fini(struct drm_i915_private *i915)
4545 {
4546 struct i915_perf *perf = &i915->perf;
4547
4548 if (!perf->i915)
4549 return;
4550
4551 idr_for_each(&perf->metrics_idr, destroy_config, perf);
4552 idr_destroy(&perf->metrics_idr);
4553
4554 memset(&perf->ops, 0, sizeof(perf->ops));
4555 perf->i915 = NULL;
4556 }
4557
4558 /**
4559 * i915_perf_ioctl_version - Version of the i915-perf subsystem
4560 *
4561 * This version number is used by userspace to detect available features.
4562 */
i915_perf_ioctl_version(void)4563 int i915_perf_ioctl_version(void)
4564 {
4565 /*
4566 * 1: Initial version
4567 * I915_PERF_IOCTL_ENABLE
4568 * I915_PERF_IOCTL_DISABLE
4569 *
4570 * 2: Added runtime modification of OA config.
4571 * I915_PERF_IOCTL_CONFIG
4572 *
4573 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4574 * preemption on a particular context so that performance data is
4575 * accessible from a delta of MI_RPC reports without looking at the
4576 * OA buffer.
4577 *
4578 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4579 * be run for the duration of the performance recording based on
4580 * their SSEU configuration.
4581 *
4582 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4583 * interval for the hrtimer used to check for OA data.
4584 */
4585 return 5;
4586 }
4587
4588 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4589 #include "selftests/i915_perf.c"
4590 #endif
4591