• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197 
198 #include "i915_drv.h"
199 #include "i915_oa_hsw.h"
200 #include "i915_oa_bdw.h"
201 #include "i915_oa_chv.h"
202 #include "i915_oa_sklgt2.h"
203 #include "i915_oa_sklgt3.h"
204 #include "i915_oa_sklgt4.h"
205 #include "i915_oa_bxt.h"
206 #include "i915_oa_kblgt2.h"
207 #include "i915_oa_kblgt3.h"
208 #include "i915_oa_glk.h"
209 
210 /* HW requires this to be a power of two, between 128k and 16M, though driver
211  * is currently generally designed assuming the largest 16M size is used such
212  * that the overflow cases are unlikely in normal operation.
213  */
214 #define OA_BUFFER_SIZE		SZ_16M
215 
216 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
217 
218 /**
219  * DOC: OA Tail Pointer Race
220  *
221  * There's a HW race condition between OA unit tail pointer register updates and
222  * writes to memory whereby the tail pointer can sometimes get ahead of what's
223  * been written out to the OA buffer so far (in terms of what's visible to the
224  * CPU).
225  *
226  * Although this can be observed explicitly while copying reports to userspace
227  * by checking for a zeroed report-id field in tail reports, we want to account
228  * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
229  * read() attempts.
230  *
231  * In effect we define a tail pointer for reading that lags the real tail
232  * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
233  * time for the corresponding reports to become visible to the CPU.
234  *
235  * To manage this we actually track two tail pointers:
236  *  1) An 'aging' tail with an associated timestamp that is tracked until we
237  *     can trust the corresponding data is visible to the CPU; at which point
238  *     it is considered 'aged'.
239  *  2) An 'aged' tail that can be used for read()ing.
240  *
241  * The two separate pointers let us decouple read()s from tail pointer aging.
242  *
243  * The tail pointers are checked and updated at a limited rate within a hrtimer
244  * callback (the same callback that is used for delivering POLLIN events)
245  *
246  * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
247  * indicates that an updated tail pointer is needed.
248  *
249  * Most of the implementation details for this workaround are in
250  * oa_buffer_check_unlocked() and _append_oa_reports()
251  *
252  * Note for posterity: previously the driver used to define an effective tail
253  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
254  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
255  * This was flawed considering that the OA unit may also automatically generate
256  * non-periodic reports (such as on context switch) or the OA unit may be
257  * enabled without any periodic sampling.
258  */
259 #define OA_TAIL_MARGIN_NSEC	100000ULL
260 #define INVALID_TAIL_PTR	0xffffffff
261 
262 /* frequency for checking whether the OA unit has written new reports to the
263  * circular OA buffer...
264  */
265 #define POLL_FREQUENCY 200
266 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
267 
268 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
269 static int zero;
270 static int one = 1;
271 static u32 i915_perf_stream_paranoid = true;
272 
273 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
274  * of the 64bit timestamp bits to trigger reports from) but there's currently
275  * no known use case for sampling as infrequently as once per 47 thousand years.
276  *
277  * Since the timestamps included in OA reports are only 32bits it seems
278  * reasonable to limit the OA exponent where it's still possible to account for
279  * overflow in OA report timestamps.
280  */
281 #define OA_EXPONENT_MAX 31
282 
283 #define INVALID_CTX_ID 0xffffffff
284 
285 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
286 #define OAREPORT_REASON_MASK           0x3f
287 #define OAREPORT_REASON_SHIFT          19
288 #define OAREPORT_REASON_TIMER          (1<<0)
289 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
290 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
291 
292 
293 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
294  *
295  * The highest sampling frequency we can theoretically program the OA unit
296  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
297  *
298  * Initialized just before we register the sysctl parameter.
299  */
300 static int oa_sample_rate_hard_limit;
301 
302 /* Theoretically we can program the OA unit to sample every 160ns but don't
303  * allow that by default unless root...
304  *
305  * The default threshold of 100000Hz is based on perf's similar
306  * kernel.perf_event_max_sample_rate sysctl parameter.
307  */
308 static u32 i915_oa_max_sample_rate = 100000;
309 
310 /* XXX: beware if future OA HW adds new report formats that the current
311  * code assumes all reports have a power-of-two size and ~(size - 1) can
312  * be used as a mask to align the OA tail pointer.
313  */
314 static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
315 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
316 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
317 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
318 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
319 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
320 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
321 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
322 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
323 };
324 
325 static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
326 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
327 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
328 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
329 	[I915_OA_FORMAT_C4_B8]		    = { 7, 64 },
330 };
331 
332 #define SAMPLE_OA_REPORT      (1<<0)
333 
334 /**
335  * struct perf_open_properties - for validated properties given to open a stream
336  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
337  * @single_context: Whether a single or all gpu contexts should be monitored
338  * @ctx_handle: A gem ctx handle for use with @single_context
339  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
340  * @oa_format: An OA unit HW report format
341  * @oa_periodic: Whether to enable periodic OA unit sampling
342  * @oa_period_exponent: The OA unit sampling period is derived from this
343  *
344  * As read_properties_unlocked() enumerates and validates the properties given
345  * to open a stream of metrics the configuration is built up in the structure
346  * which starts out zero initialized.
347  */
348 struct perf_open_properties {
349 	u32 sample_flags;
350 
351 	u64 single_context:1;
352 	u64 ctx_handle;
353 
354 	/* OA sampling state */
355 	int metrics_set;
356 	int oa_format;
357 	bool oa_periodic;
358 	int oa_period_exponent;
359 };
360 
free_oa_config(struct drm_i915_private * dev_priv,struct i915_oa_config * oa_config)361 static void free_oa_config(struct drm_i915_private *dev_priv,
362 			   struct i915_oa_config *oa_config)
363 {
364 	if (!PTR_ERR(oa_config->flex_regs))
365 		kfree(oa_config->flex_regs);
366 	if (!PTR_ERR(oa_config->b_counter_regs))
367 		kfree(oa_config->b_counter_regs);
368 	if (!PTR_ERR(oa_config->mux_regs))
369 		kfree(oa_config->mux_regs);
370 	kfree(oa_config);
371 }
372 
put_oa_config(struct drm_i915_private * dev_priv,struct i915_oa_config * oa_config)373 static void put_oa_config(struct drm_i915_private *dev_priv,
374 			  struct i915_oa_config *oa_config)
375 {
376 	if (!atomic_dec_and_test(&oa_config->ref_count))
377 		return;
378 
379 	free_oa_config(dev_priv, oa_config);
380 }
381 
get_oa_config(struct drm_i915_private * dev_priv,int metrics_set,struct i915_oa_config ** out_config)382 static int get_oa_config(struct drm_i915_private *dev_priv,
383 			 int metrics_set,
384 			 struct i915_oa_config **out_config)
385 {
386 	int ret;
387 
388 	if (metrics_set == 1) {
389 		*out_config = &dev_priv->perf.oa.test_config;
390 		atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
391 		return 0;
392 	}
393 
394 	ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
395 	if (ret)
396 		return ret;
397 
398 	*out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
399 	if (!*out_config)
400 		ret = -EINVAL;
401 	else
402 		atomic_inc(&(*out_config)->ref_count);
403 
404 	mutex_unlock(&dev_priv->perf.metrics_lock);
405 
406 	return ret;
407 }
408 
gen8_oa_hw_tail_read(struct drm_i915_private * dev_priv)409 static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
410 {
411 	return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
412 }
413 
gen7_oa_hw_tail_read(struct drm_i915_private * dev_priv)414 static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
415 {
416 	u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
417 
418 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
419 }
420 
421 /**
422  * oa_buffer_check_unlocked - check for data and update tail ptr state
423  * @dev_priv: i915 device instance
424  *
425  * This is either called via fops (for blocking reads in user ctx) or the poll
426  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
427  * if there is data available for userspace to read.
428  *
429  * This function is central to providing a workaround for the OA unit tail
430  * pointer having a race with respect to what data is visible to the CPU.
431  * It is responsible for reading tail pointers from the hardware and giving
432  * the pointers time to 'age' before they are made available for reading.
433  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
434  *
435  * Besides returning true when there is data available to read() this function
436  * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
437  * and .aged_tail_idx state used for reading.
438  *
439  * Note: It's safe to read OA config state here unlocked, assuming that this is
440  * only called while the stream is enabled, while the global OA configuration
441  * can't be modified.
442  *
443  * Returns: %true if the OA buffer contains data, else %false
444  */
oa_buffer_check_unlocked(struct drm_i915_private * dev_priv)445 static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
446 {
447 	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
448 	unsigned long flags;
449 	unsigned int aged_idx;
450 	u32 head, hw_tail, aged_tail, aging_tail;
451 	u64 now;
452 
453 	/* We have to consider the (unlikely) possibility that read() errors
454 	 * could result in an OA buffer reset which might reset the head,
455 	 * tails[] and aged_tail state.
456 	 */
457 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
458 
459 	/* NB: The head we observe here might effectively be a little out of
460 	 * date (between head and tails[aged_idx].offset if there is currently
461 	 * a read() in progress.
462 	 */
463 	head = dev_priv->perf.oa.oa_buffer.head;
464 
465 	aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
466 	aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
467 	aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
468 
469 	hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
470 
471 	/* The tail pointer increases in 64 byte increments,
472 	 * not in report_size steps...
473 	 */
474 	hw_tail &= ~(report_size - 1);
475 
476 	now = ktime_get_mono_fast_ns();
477 
478 	/* Update the aged tail
479 	 *
480 	 * Flip the tail pointer available for read()s once the aging tail is
481 	 * old enough to trust that the corresponding data will be visible to
482 	 * the CPU...
483 	 *
484 	 * Do this before updating the aging pointer in case we may be able to
485 	 * immediately start aging a new pointer too (if new data has become
486 	 * available) without needing to wait for a later hrtimer callback.
487 	 */
488 	if (aging_tail != INVALID_TAIL_PTR &&
489 	    ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
490 	     OA_TAIL_MARGIN_NSEC)) {
491 
492 		aged_idx ^= 1;
493 		dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
494 
495 		aged_tail = aging_tail;
496 
497 		/* Mark that we need a new pointer to start aging... */
498 		dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
499 		aging_tail = INVALID_TAIL_PTR;
500 	}
501 
502 	/* Update the aging tail
503 	 *
504 	 * We throttle aging tail updates until we have a new tail that
505 	 * represents >= one report more data than is already available for
506 	 * reading. This ensures there will be enough data for a successful
507 	 * read once this new pointer has aged and ensures we will give the new
508 	 * pointer time to age.
509 	 */
510 	if (aging_tail == INVALID_TAIL_PTR &&
511 	    (aged_tail == INVALID_TAIL_PTR ||
512 	     OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
513 		struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
514 		u32 gtt_offset = i915_ggtt_offset(vma);
515 
516 		/* Be paranoid and do a bounds check on the pointer read back
517 		 * from hardware, just in case some spurious hardware condition
518 		 * could put the tail out of bounds...
519 		 */
520 		if (hw_tail >= gtt_offset &&
521 		    hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
522 			dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
523 				aging_tail = hw_tail;
524 			dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
525 		} else {
526 			DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
527 				  hw_tail);
528 		}
529 	}
530 
531 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
532 
533 	return aged_tail == INVALID_TAIL_PTR ?
534 		false : OA_TAKEN(aged_tail, head) >= report_size;
535 }
536 
537 /**
538  * append_oa_status - Appends a status record to a userspace read() buffer.
539  * @stream: An i915-perf stream opened for OA metrics
540  * @buf: destination buffer given by userspace
541  * @count: the number of bytes userspace wants to read
542  * @offset: (inout): the current position for writing into @buf
543  * @type: The kind of status to report to userspace
544  *
545  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
546  * into the userspace read() buffer.
547  *
548  * The @buf @offset will only be updated on success.
549  *
550  * Returns: 0 on success, negative error code on failure.
551  */
append_oa_status(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,enum drm_i915_perf_record_type type)552 static int append_oa_status(struct i915_perf_stream *stream,
553 			    char __user *buf,
554 			    size_t count,
555 			    size_t *offset,
556 			    enum drm_i915_perf_record_type type)
557 {
558 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
559 
560 	if ((count - *offset) < header.size)
561 		return -ENOSPC;
562 
563 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
564 		return -EFAULT;
565 
566 	(*offset) += header.size;
567 
568 	return 0;
569 }
570 
571 /**
572  * append_oa_sample - Copies single OA report into userspace read() buffer.
573  * @stream: An i915-perf stream opened for OA metrics
574  * @buf: destination buffer given by userspace
575  * @count: the number of bytes userspace wants to read
576  * @offset: (inout): the current position for writing into @buf
577  * @report: A single OA report to (optionally) include as part of the sample
578  *
579  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
580  * properties when opening a stream, tracked as `stream->sample_flags`. This
581  * function copies the requested components of a single sample to the given
582  * read() @buf.
583  *
584  * The @buf @offset will only be updated on success.
585  *
586  * Returns: 0 on success, negative error code on failure.
587  */
append_oa_sample(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)588 static int append_oa_sample(struct i915_perf_stream *stream,
589 			    char __user *buf,
590 			    size_t count,
591 			    size_t *offset,
592 			    const u8 *report)
593 {
594 	struct drm_i915_private *dev_priv = stream->dev_priv;
595 	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
596 	struct drm_i915_perf_record_header header;
597 	u32 sample_flags = stream->sample_flags;
598 
599 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
600 	header.pad = 0;
601 	header.size = stream->sample_size;
602 
603 	if ((count - *offset) < header.size)
604 		return -ENOSPC;
605 
606 	buf += *offset;
607 	if (copy_to_user(buf, &header, sizeof(header)))
608 		return -EFAULT;
609 	buf += sizeof(header);
610 
611 	if (sample_flags & SAMPLE_OA_REPORT) {
612 		if (copy_to_user(buf, report, report_size))
613 			return -EFAULT;
614 	}
615 
616 	(*offset) += header.size;
617 
618 	return 0;
619 }
620 
621 /**
622  * Copies all buffered OA reports into userspace read() buffer.
623  * @stream: An i915-perf stream opened for OA metrics
624  * @buf: destination buffer given by userspace
625  * @count: the number of bytes userspace wants to read
626  * @offset: (inout): the current position for writing into @buf
627  *
628  * Notably any error condition resulting in a short read (-%ENOSPC or
629  * -%EFAULT) will be returned even though one or more records may
630  * have been successfully copied. In this case it's up to the caller
631  * to decide if the error should be squashed before returning to
632  * userspace.
633  *
634  * Note: reports are consumed from the head, and appended to the
635  * tail, so the tail chases the head?... If you think that's mad
636  * and back-to-front you're not alone, but this follows the
637  * Gen PRM naming convention.
638  *
639  * Returns: 0 on success, negative error code on failure.
640  */
gen8_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)641 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
642 				  char __user *buf,
643 				  size_t count,
644 				  size_t *offset)
645 {
646 	struct drm_i915_private *dev_priv = stream->dev_priv;
647 	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
648 	u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
649 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
650 	u32 mask = (OA_BUFFER_SIZE - 1);
651 	size_t start_offset = *offset;
652 	unsigned long flags;
653 	unsigned int aged_tail_idx;
654 	u32 head, tail;
655 	u32 taken;
656 	int ret = 0;
657 
658 	if (WARN_ON(!stream->enabled))
659 		return -EIO;
660 
661 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
662 
663 	head = dev_priv->perf.oa.oa_buffer.head;
664 	aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
665 	tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
666 
667 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
668 
669 	/*
670 	 * An invalid tail pointer here means we're still waiting for the poll
671 	 * hrtimer callback to give us a pointer
672 	 */
673 	if (tail == INVALID_TAIL_PTR)
674 		return -EAGAIN;
675 
676 	/*
677 	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
678 	 * while indexing relative to oa_buf_base.
679 	 */
680 	head -= gtt_offset;
681 	tail -= gtt_offset;
682 
683 	/*
684 	 * An out of bounds or misaligned head or tail pointer implies a driver
685 	 * bug since we validate + align the tail pointers we read from the
686 	 * hardware and we are in full control of the head pointer which should
687 	 * only be incremented by multiples of the report size (notably also
688 	 * all a power of two).
689 	 */
690 	if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
691 		      tail > OA_BUFFER_SIZE || tail % report_size,
692 		      "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
693 		      head, tail))
694 		return -EIO;
695 
696 
697 	for (/* none */;
698 	     (taken = OA_TAKEN(tail, head));
699 	     head = (head + report_size) & mask) {
700 		u8 *report = oa_buf_base + head;
701 		u32 *report32 = (void *)report;
702 		u32 ctx_id;
703 		u32 reason;
704 
705 		/*
706 		 * All the report sizes factor neatly into the buffer
707 		 * size so we never expect to see a report split
708 		 * between the beginning and end of the buffer.
709 		 *
710 		 * Given the initial alignment check a misalignment
711 		 * here would imply a driver bug that would result
712 		 * in an overrun.
713 		 */
714 		if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
715 			DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
716 			break;
717 		}
718 
719 		/*
720 		 * The reason field includes flags identifying what
721 		 * triggered this specific report (mostly timer
722 		 * triggered or e.g. due to a context switch).
723 		 *
724 		 * This field is never expected to be zero so we can
725 		 * check that the report isn't invalid before copying
726 		 * it to userspace...
727 		 */
728 		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
729 			  OAREPORT_REASON_MASK);
730 		if (reason == 0) {
731 			if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
732 				DRM_NOTE("Skipping spurious, invalid OA report\n");
733 			continue;
734 		}
735 
736 		/*
737 		 * XXX: Just keep the lower 21 bits for now since I'm not
738 		 * entirely sure if the HW touches any of the higher bits in
739 		 * this field
740 		 */
741 		ctx_id = report32[2] & 0x1fffff;
742 
743 		/*
744 		 * Squash whatever is in the CTX_ID field if it's marked as
745 		 * invalid to be sure we avoid false-positive, single-context
746 		 * filtering below...
747 		 *
748 		 * Note: that we don't clear the valid_ctx_bit so userspace can
749 		 * understand that the ID has been squashed by the kernel.
750 		 */
751 		if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
752 			ctx_id = report32[2] = INVALID_CTX_ID;
753 
754 		/*
755 		 * NB: For Gen 8 the OA unit no longer supports clock gating
756 		 * off for a specific context and the kernel can't securely
757 		 * stop the counters from updating as system-wide / global
758 		 * values.
759 		 *
760 		 * Automatic reports now include a context ID so reports can be
761 		 * filtered on the cpu but it's not worth trying to
762 		 * automatically subtract/hide counter progress for other
763 		 * contexts while filtering since we can't stop userspace
764 		 * issuing MI_REPORT_PERF_COUNT commands which would still
765 		 * provide a side-band view of the real values.
766 		 *
767 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
768 		 * to normalize counters for a single filtered context then it
769 		 * needs be forwarded bookend context-switch reports so that it
770 		 * can track switches in between MI_REPORT_PERF_COUNT commands
771 		 * and can itself subtract/ignore the progress of counters
772 		 * associated with other contexts. Note that the hardware
773 		 * automatically triggers reports when switching to a new
774 		 * context which are tagged with the ID of the newly active
775 		 * context. To avoid the complexity (and likely fragility) of
776 		 * reading ahead while parsing reports to try and minimize
777 		 * forwarding redundant context switch reports (i.e. between
778 		 * other, unrelated contexts) we simply elect to forward them
779 		 * all.
780 		 *
781 		 * We don't rely solely on the reason field to identify context
782 		 * switches since it's not-uncommon for periodic samples to
783 		 * identify a switch before any 'context switch' report.
784 		 */
785 		if (!dev_priv->perf.oa.exclusive_stream->ctx ||
786 		    dev_priv->perf.oa.specific_ctx_id == ctx_id ||
787 		    (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
788 		     dev_priv->perf.oa.specific_ctx_id) ||
789 		    reason & OAREPORT_REASON_CTX_SWITCH) {
790 
791 			/*
792 			 * While filtering for a single context we avoid
793 			 * leaking the IDs of other contexts.
794 			 */
795 			if (dev_priv->perf.oa.exclusive_stream->ctx &&
796 			    dev_priv->perf.oa.specific_ctx_id != ctx_id) {
797 				report32[2] = INVALID_CTX_ID;
798 			}
799 
800 			ret = append_oa_sample(stream, buf, count, offset,
801 					       report);
802 			if (ret)
803 				break;
804 
805 			dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
806 		}
807 
808 		/*
809 		 * The above reason field sanity check is based on
810 		 * the assumption that the OA buffer is initially
811 		 * zeroed and we reset the field after copying so the
812 		 * check is still meaningful once old reports start
813 		 * being overwritten.
814 		 */
815 		report32[0] = 0;
816 	}
817 
818 	if (start_offset != *offset) {
819 		spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
820 
821 		/*
822 		 * We removed the gtt_offset for the copy loop above, indexing
823 		 * relative to oa_buf_base so put back here...
824 		 */
825 		head += gtt_offset;
826 
827 		I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
828 		dev_priv->perf.oa.oa_buffer.head = head;
829 
830 		spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
831 	}
832 
833 	return ret;
834 }
835 
836 /**
837  * gen8_oa_read - copy status records then buffered OA reports
838  * @stream: An i915-perf stream opened for OA metrics
839  * @buf: destination buffer given by userspace
840  * @count: the number of bytes userspace wants to read
841  * @offset: (inout): the current position for writing into @buf
842  *
843  * Checks OA unit status registers and if necessary appends corresponding
844  * status records for userspace (such as for a buffer full condition) and then
845  * initiate appending any buffered OA reports.
846  *
847  * Updates @offset according to the number of bytes successfully copied into
848  * the userspace buffer.
849  *
850  * NB: some data may be successfully copied to the userspace buffer
851  * even if an error is returned, and this is reflected in the
852  * updated @offset.
853  *
854  * Returns: zero on success or a negative error code
855  */
gen8_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)856 static int gen8_oa_read(struct i915_perf_stream *stream,
857 			char __user *buf,
858 			size_t count,
859 			size_t *offset)
860 {
861 	struct drm_i915_private *dev_priv = stream->dev_priv;
862 	u32 oastatus;
863 	int ret;
864 
865 	if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
866 		return -EIO;
867 
868 	oastatus = I915_READ(GEN8_OASTATUS);
869 
870 	/*
871 	 * We treat OABUFFER_OVERFLOW as a significant error:
872 	 *
873 	 * Although theoretically we could handle this more gracefully
874 	 * sometimes, some Gens don't correctly suppress certain
875 	 * automatically triggered reports in this condition and so we
876 	 * have to assume that old reports are now being trampled
877 	 * over.
878 	 *
879 	 * Considering how we don't currently give userspace control
880 	 * over the OA buffer size and always configure a large 16MB
881 	 * buffer, then a buffer overflow does anyway likely indicate
882 	 * that something has gone quite badly wrong.
883 	 */
884 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
885 		ret = append_oa_status(stream, buf, count, offset,
886 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
887 		if (ret)
888 			return ret;
889 
890 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
891 			  dev_priv->perf.oa.period_exponent);
892 
893 		dev_priv->perf.oa.ops.oa_disable(dev_priv);
894 		dev_priv->perf.oa.ops.oa_enable(dev_priv);
895 
896 		/*
897 		 * Note: .oa_enable() is expected to re-init the oabuffer and
898 		 * reset GEN8_OASTATUS for us
899 		 */
900 		oastatus = I915_READ(GEN8_OASTATUS);
901 	}
902 
903 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
904 		ret = append_oa_status(stream, buf, count, offset,
905 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
906 		if (ret)
907 			return ret;
908 		I915_WRITE(GEN8_OASTATUS,
909 			   oastatus & ~GEN8_OASTATUS_REPORT_LOST);
910 	}
911 
912 	return gen8_append_oa_reports(stream, buf, count, offset);
913 }
914 
915 /**
916  * Copies all buffered OA reports into userspace read() buffer.
917  * @stream: An i915-perf stream opened for OA metrics
918  * @buf: destination buffer given by userspace
919  * @count: the number of bytes userspace wants to read
920  * @offset: (inout): the current position for writing into @buf
921  *
922  * Notably any error condition resulting in a short read (-%ENOSPC or
923  * -%EFAULT) will be returned even though one or more records may
924  * have been successfully copied. In this case it's up to the caller
925  * to decide if the error should be squashed before returning to
926  * userspace.
927  *
928  * Note: reports are consumed from the head, and appended to the
929  * tail, so the tail chases the head?... If you think that's mad
930  * and back-to-front you're not alone, but this follows the
931  * Gen PRM naming convention.
932  *
933  * Returns: 0 on success, negative error code on failure.
934  */
gen7_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)935 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
936 				  char __user *buf,
937 				  size_t count,
938 				  size_t *offset)
939 {
940 	struct drm_i915_private *dev_priv = stream->dev_priv;
941 	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
942 	u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
943 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
944 	u32 mask = (OA_BUFFER_SIZE - 1);
945 	size_t start_offset = *offset;
946 	unsigned long flags;
947 	unsigned int aged_tail_idx;
948 	u32 head, tail;
949 	u32 taken;
950 	int ret = 0;
951 
952 	if (WARN_ON(!stream->enabled))
953 		return -EIO;
954 
955 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
956 
957 	head = dev_priv->perf.oa.oa_buffer.head;
958 	aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
959 	tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
960 
961 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
962 
963 	/* An invalid tail pointer here means we're still waiting for the poll
964 	 * hrtimer callback to give us a pointer
965 	 */
966 	if (tail == INVALID_TAIL_PTR)
967 		return -EAGAIN;
968 
969 	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
970 	 * while indexing relative to oa_buf_base.
971 	 */
972 	head -= gtt_offset;
973 	tail -= gtt_offset;
974 
975 	/* An out of bounds or misaligned head or tail pointer implies a driver
976 	 * bug since we validate + align the tail pointers we read from the
977 	 * hardware and we are in full control of the head pointer which should
978 	 * only be incremented by multiples of the report size (notably also
979 	 * all a power of two).
980 	 */
981 	if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
982 		      tail > OA_BUFFER_SIZE || tail % report_size,
983 		      "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
984 		      head, tail))
985 		return -EIO;
986 
987 
988 	for (/* none */;
989 	     (taken = OA_TAKEN(tail, head));
990 	     head = (head + report_size) & mask) {
991 		u8 *report = oa_buf_base + head;
992 		u32 *report32 = (void *)report;
993 
994 		/* All the report sizes factor neatly into the buffer
995 		 * size so we never expect to see a report split
996 		 * between the beginning and end of the buffer.
997 		 *
998 		 * Given the initial alignment check a misalignment
999 		 * here would imply a driver bug that would result
1000 		 * in an overrun.
1001 		 */
1002 		if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
1003 			DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
1004 			break;
1005 		}
1006 
1007 		/* The report-ID field for periodic samples includes
1008 		 * some undocumented flags related to what triggered
1009 		 * the report and is never expected to be zero so we
1010 		 * can check that the report isn't invalid before
1011 		 * copying it to userspace...
1012 		 */
1013 		if (report32[0] == 0) {
1014 			if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
1015 				DRM_NOTE("Skipping spurious, invalid OA report\n");
1016 			continue;
1017 		}
1018 
1019 		ret = append_oa_sample(stream, buf, count, offset, report);
1020 		if (ret)
1021 			break;
1022 
1023 		/* The above report-id field sanity check is based on
1024 		 * the assumption that the OA buffer is initially
1025 		 * zeroed and we reset the field after copying so the
1026 		 * check is still meaningful once old reports start
1027 		 * being overwritten.
1028 		 */
1029 		report32[0] = 0;
1030 	}
1031 
1032 	if (start_offset != *offset) {
1033 		spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1034 
1035 		/* We removed the gtt_offset for the copy loop above, indexing
1036 		 * relative to oa_buf_base so put back here...
1037 		 */
1038 		head += gtt_offset;
1039 
1040 		I915_WRITE(GEN7_OASTATUS2,
1041 			   ((head & GEN7_OASTATUS2_HEAD_MASK) |
1042 			    OA_MEM_SELECT_GGTT));
1043 		dev_priv->perf.oa.oa_buffer.head = head;
1044 
1045 		spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1046 	}
1047 
1048 	return ret;
1049 }
1050 
1051 /**
1052  * gen7_oa_read - copy status records then buffered OA reports
1053  * @stream: An i915-perf stream opened for OA metrics
1054  * @buf: destination buffer given by userspace
1055  * @count: the number of bytes userspace wants to read
1056  * @offset: (inout): the current position for writing into @buf
1057  *
1058  * Checks Gen 7 specific OA unit status registers and if necessary appends
1059  * corresponding status records for userspace (such as for a buffer full
1060  * condition) and then initiate appending any buffered OA reports.
1061  *
1062  * Updates @offset according to the number of bytes successfully copied into
1063  * the userspace buffer.
1064  *
1065  * Returns: zero on success or a negative error code
1066  */
gen7_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1067 static int gen7_oa_read(struct i915_perf_stream *stream,
1068 			char __user *buf,
1069 			size_t count,
1070 			size_t *offset)
1071 {
1072 	struct drm_i915_private *dev_priv = stream->dev_priv;
1073 	u32 oastatus1;
1074 	int ret;
1075 
1076 	if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
1077 		return -EIO;
1078 
1079 	oastatus1 = I915_READ(GEN7_OASTATUS1);
1080 
1081 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1082 	 * bits while the OA unit is enabled (while the tail pointer
1083 	 * may be updated asynchronously) so we ignore status bits
1084 	 * that have already been reported to userspace.
1085 	 */
1086 	oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
1087 
1088 	/* We treat OABUFFER_OVERFLOW as a significant error:
1089 	 *
1090 	 * - The status can be interpreted to mean that the buffer is
1091 	 *   currently full (with a higher precedence than OA_TAKEN()
1092 	 *   which will start to report a near-empty buffer after an
1093 	 *   overflow) but it's awkward that we can't clear the status
1094 	 *   on Haswell, so without a reset we won't be able to catch
1095 	 *   the state again.
1096 	 *
1097 	 * - Since it also implies the HW has started overwriting old
1098 	 *   reports it may also affect our sanity checks for invalid
1099 	 *   reports when copying to userspace that assume new reports
1100 	 *   are being written to cleared memory.
1101 	 *
1102 	 * - In the future we may want to introduce a flight recorder
1103 	 *   mode where the driver will automatically maintain a safe
1104 	 *   guard band between head/tail, avoiding this overflow
1105 	 *   condition, but we avoid the added driver complexity for
1106 	 *   now.
1107 	 */
1108 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1109 		ret = append_oa_status(stream, buf, count, offset,
1110 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1111 		if (ret)
1112 			return ret;
1113 
1114 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1115 			  dev_priv->perf.oa.period_exponent);
1116 
1117 		dev_priv->perf.oa.ops.oa_disable(dev_priv);
1118 		dev_priv->perf.oa.ops.oa_enable(dev_priv);
1119 
1120 		oastatus1 = I915_READ(GEN7_OASTATUS1);
1121 	}
1122 
1123 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1124 		ret = append_oa_status(stream, buf, count, offset,
1125 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1126 		if (ret)
1127 			return ret;
1128 		dev_priv->perf.oa.gen7_latched_oastatus1 |=
1129 			GEN7_OASTATUS1_REPORT_LOST;
1130 	}
1131 
1132 	return gen7_append_oa_reports(stream, buf, count, offset);
1133 }
1134 
1135 /**
1136  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1137  * @stream: An i915-perf stream opened for OA metrics
1138  *
1139  * Called when userspace tries to read() from a blocking stream FD opened
1140  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1141  * OA buffer and wakes us.
1142  *
1143  * Note: it's acceptable to have this return with some false positives
1144  * since any subsequent read handling will return -EAGAIN if there isn't
1145  * really data ready for userspace yet.
1146  *
1147  * Returns: zero on success or a negative error code
1148  */
i915_oa_wait_unlocked(struct i915_perf_stream * stream)1149 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1150 {
1151 	struct drm_i915_private *dev_priv = stream->dev_priv;
1152 
1153 	/* We would wait indefinitely if periodic sampling is not enabled */
1154 	if (!dev_priv->perf.oa.periodic)
1155 		return -EIO;
1156 
1157 	return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
1158 					oa_buffer_check_unlocked(dev_priv));
1159 }
1160 
1161 /**
1162  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1163  * @stream: An i915-perf stream opened for OA metrics
1164  * @file: An i915 perf stream file
1165  * @wait: poll() state table
1166  *
1167  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1168  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1169  * when it sees data ready to read in the circular OA buffer.
1170  */
i915_oa_poll_wait(struct i915_perf_stream * stream,struct file * file,poll_table * wait)1171 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1172 			      struct file *file,
1173 			      poll_table *wait)
1174 {
1175 	struct drm_i915_private *dev_priv = stream->dev_priv;
1176 
1177 	poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
1178 }
1179 
1180 /**
1181  * i915_oa_read - just calls through to &i915_oa_ops->read
1182  * @stream: An i915-perf stream opened for OA metrics
1183  * @buf: destination buffer given by userspace
1184  * @count: the number of bytes userspace wants to read
1185  * @offset: (inout): the current position for writing into @buf
1186  *
1187  * Updates @offset according to the number of bytes successfully copied into
1188  * the userspace buffer.
1189  *
1190  * Returns: zero on success or a negative error code
1191  */
i915_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1192 static int i915_oa_read(struct i915_perf_stream *stream,
1193 			char __user *buf,
1194 			size_t count,
1195 			size_t *offset)
1196 {
1197 	struct drm_i915_private *dev_priv = stream->dev_priv;
1198 
1199 	return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
1200 }
1201 
1202 /**
1203  * oa_get_render_ctx_id - determine and hold ctx hw id
1204  * @stream: An i915-perf stream opened for OA metrics
1205  *
1206  * Determine the render context hw id, and ensure it remains fixed for the
1207  * lifetime of the stream. This ensures that we don't have to worry about
1208  * updating the context ID in OACONTROL on the fly.
1209  *
1210  * Returns: zero on success or a negative error code
1211  */
oa_get_render_ctx_id(struct i915_perf_stream * stream)1212 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1213 {
1214 	struct drm_i915_private *dev_priv = stream->dev_priv;
1215 
1216 	if (i915.enable_execlists)
1217 		dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
1218 	else {
1219 		struct intel_engine_cs *engine = dev_priv->engine[RCS];
1220 		struct intel_ring *ring;
1221 		int ret;
1222 
1223 		ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1224 		if (ret)
1225 			return ret;
1226 
1227 		/*
1228 		 * As the ID is the gtt offset of the context's vma we
1229 		 * pin the vma to ensure the ID remains fixed.
1230 		 *
1231 		 * NB: implied RCS engine...
1232 		 */
1233 		ring = engine->context_pin(engine, stream->ctx);
1234 		mutex_unlock(&dev_priv->drm.struct_mutex);
1235 		if (IS_ERR(ring))
1236 			return PTR_ERR(ring);
1237 
1238 
1239 		/*
1240 		 * Explicitly track the ID (instead of calling
1241 		 * i915_ggtt_offset() on the fly) considering the difference
1242 		 * with gen8+ and execlists
1243 		 */
1244 		dev_priv->perf.oa.specific_ctx_id =
1245 			i915_ggtt_offset(stream->ctx->engine[engine->id].state);
1246 	}
1247 
1248 	return 0;
1249 }
1250 
1251 /**
1252  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1253  * @stream: An i915-perf stream opened for OA metrics
1254  *
1255  * In case anything needed doing to ensure the context HW ID would remain valid
1256  * for the lifetime of the stream, then that can be undone here.
1257  */
oa_put_render_ctx_id(struct i915_perf_stream * stream)1258 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1259 {
1260 	struct drm_i915_private *dev_priv = stream->dev_priv;
1261 
1262 	if (i915.enable_execlists) {
1263 		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
1264 	} else {
1265 		struct intel_engine_cs *engine = dev_priv->engine[RCS];
1266 
1267 		mutex_lock(&dev_priv->drm.struct_mutex);
1268 
1269 		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
1270 		engine->context_unpin(engine, stream->ctx);
1271 
1272 		mutex_unlock(&dev_priv->drm.struct_mutex);
1273 	}
1274 }
1275 
1276 static void
free_oa_buffer(struct drm_i915_private * i915)1277 free_oa_buffer(struct drm_i915_private *i915)
1278 {
1279 	mutex_lock(&i915->drm.struct_mutex);
1280 
1281 	i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
1282 	i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
1283 	i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
1284 
1285 	i915->perf.oa.oa_buffer.vma = NULL;
1286 	i915->perf.oa.oa_buffer.vaddr = NULL;
1287 
1288 	mutex_unlock(&i915->drm.struct_mutex);
1289 }
1290 
i915_oa_stream_destroy(struct i915_perf_stream * stream)1291 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1292 {
1293 	struct drm_i915_private *dev_priv = stream->dev_priv;
1294 
1295 	BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
1296 
1297 	/*
1298 	 * Unset exclusive_stream first, it will be checked while disabling
1299 	 * the metric set on gen8+.
1300 	 */
1301 	mutex_lock(&dev_priv->drm.struct_mutex);
1302 	dev_priv->perf.oa.exclusive_stream = NULL;
1303 	dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
1304 	mutex_unlock(&dev_priv->drm.struct_mutex);
1305 
1306 	free_oa_buffer(dev_priv);
1307 
1308 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1309 	intel_runtime_pm_put(dev_priv);
1310 
1311 	if (stream->ctx)
1312 		oa_put_render_ctx_id(stream);
1313 
1314 	put_oa_config(dev_priv, stream->oa_config);
1315 
1316 	if (dev_priv->perf.oa.spurious_report_rs.missed) {
1317 		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1318 			 dev_priv->perf.oa.spurious_report_rs.missed);
1319 	}
1320 }
1321 
gen7_init_oa_buffer(struct drm_i915_private * dev_priv)1322 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
1323 {
1324 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
1325 	unsigned long flags;
1326 
1327 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1328 
1329 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1330 	 * before OASTATUS1, but after OASTATUS2
1331 	 */
1332 	I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
1333 	dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1334 
1335 	I915_WRITE(GEN7_OABUFFER, gtt_offset);
1336 
1337 	I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
1338 
1339 	/* Mark that we need updated tail pointers to read from... */
1340 	dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1341 	dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1342 
1343 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1344 
1345 	/* On Haswell we have to track which OASTATUS1 flags we've
1346 	 * already seen since they can't be cleared while periodic
1347 	 * sampling is enabled.
1348 	 */
1349 	dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
1350 
1351 	/* NB: although the OA buffer will initially be allocated
1352 	 * zeroed via shmfs (and so this memset is redundant when
1353 	 * first allocating), we may re-init the OA buffer, either
1354 	 * when re-enabling a stream or in error/reset paths.
1355 	 *
1356 	 * The reason we clear the buffer for each re-init is for the
1357 	 * sanity check in gen7_append_oa_reports() that looks at the
1358 	 * report-id field to make sure it's non-zero which relies on
1359 	 * the assumption that new reports are being written to zeroed
1360 	 * memory...
1361 	 */
1362 	memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1363 
1364 	/* Maybe make ->pollin per-stream state if we support multiple
1365 	 * concurrent streams in the future.
1366 	 */
1367 	dev_priv->perf.oa.pollin = false;
1368 }
1369 
gen8_init_oa_buffer(struct drm_i915_private * dev_priv)1370 static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
1371 {
1372 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
1373 	unsigned long flags;
1374 
1375 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1376 
1377 	I915_WRITE(GEN8_OASTATUS, 0);
1378 	I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
1379 	dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1380 
1381 	I915_WRITE(GEN8_OABUFFER_UDW, 0);
1382 
1383 	/*
1384 	 * PRM says:
1385 	 *
1386 	 *  "This MMIO must be set before the OATAILPTR
1387 	 *  register and after the OAHEADPTR register. This is
1388 	 *  to enable proper functionality of the overflow
1389 	 *  bit."
1390 	 */
1391 	I915_WRITE(GEN8_OABUFFER, gtt_offset |
1392 		   OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT);
1393 	I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1394 
1395 	/* Mark that we need updated tail pointers to read from... */
1396 	dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1397 	dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1398 
1399 	/*
1400 	 * Reset state used to recognise context switches, affecting which
1401 	 * reports we will forward to userspace while filtering for a single
1402 	 * context.
1403 	 */
1404 	dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
1405 
1406 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1407 
1408 	/*
1409 	 * NB: although the OA buffer will initially be allocated
1410 	 * zeroed via shmfs (and so this memset is redundant when
1411 	 * first allocating), we may re-init the OA buffer, either
1412 	 * when re-enabling a stream or in error/reset paths.
1413 	 *
1414 	 * The reason we clear the buffer for each re-init is for the
1415 	 * sanity check in gen8_append_oa_reports() that looks at the
1416 	 * reason field to make sure it's non-zero which relies on
1417 	 * the assumption that new reports are being written to zeroed
1418 	 * memory...
1419 	 */
1420 	memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1421 
1422 	/*
1423 	 * Maybe make ->pollin per-stream state if we support multiple
1424 	 * concurrent streams in the future.
1425 	 */
1426 	dev_priv->perf.oa.pollin = false;
1427 }
1428 
alloc_oa_buffer(struct drm_i915_private * dev_priv)1429 static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
1430 {
1431 	struct drm_i915_gem_object *bo;
1432 	struct i915_vma *vma;
1433 	int ret;
1434 
1435 	if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
1436 		return -ENODEV;
1437 
1438 	ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1439 	if (ret)
1440 		return ret;
1441 
1442 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1443 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1444 
1445 	bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
1446 	if (IS_ERR(bo)) {
1447 		DRM_ERROR("Failed to allocate OA buffer\n");
1448 		ret = PTR_ERR(bo);
1449 		goto unlock;
1450 	}
1451 
1452 	ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
1453 	if (ret)
1454 		goto err_unref;
1455 
1456 	/* PreHSW required 512K alignment, HSW requires 16M */
1457 	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1458 	if (IS_ERR(vma)) {
1459 		ret = PTR_ERR(vma);
1460 		goto err_unref;
1461 	}
1462 	dev_priv->perf.oa.oa_buffer.vma = vma;
1463 
1464 	dev_priv->perf.oa.oa_buffer.vaddr =
1465 		i915_gem_object_pin_map(bo, I915_MAP_WB);
1466 	if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
1467 		ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
1468 		goto err_unpin;
1469 	}
1470 
1471 	dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
1472 
1473 	DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
1474 			 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
1475 			 dev_priv->perf.oa.oa_buffer.vaddr);
1476 
1477 	goto unlock;
1478 
1479 err_unpin:
1480 	__i915_vma_unpin(vma);
1481 
1482 err_unref:
1483 	i915_gem_object_put(bo);
1484 
1485 	dev_priv->perf.oa.oa_buffer.vaddr = NULL;
1486 	dev_priv->perf.oa.oa_buffer.vma = NULL;
1487 
1488 unlock:
1489 	mutex_unlock(&dev_priv->drm.struct_mutex);
1490 	return ret;
1491 }
1492 
config_oa_regs(struct drm_i915_private * dev_priv,const struct i915_oa_reg * regs,u32 n_regs)1493 static void config_oa_regs(struct drm_i915_private *dev_priv,
1494 			   const struct i915_oa_reg *regs,
1495 			   u32 n_regs)
1496 {
1497 	u32 i;
1498 
1499 	for (i = 0; i < n_regs; i++) {
1500 		const struct i915_oa_reg *reg = regs + i;
1501 
1502 		I915_WRITE(reg->addr, reg->value);
1503 	}
1504 }
1505 
hsw_enable_metric_set(struct drm_i915_private * dev_priv,const struct i915_oa_config * oa_config)1506 static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
1507 				 const struct i915_oa_config *oa_config)
1508 {
1509 	/* PRM:
1510 	 *
1511 	 * OA unit is using “crclk” for its functionality. When trunk
1512 	 * level clock gating takes place, OA clock would be gated,
1513 	 * unable to count the events from non-render clock domain.
1514 	 * Render clock gating must be disabled when OA is enabled to
1515 	 * count the events from non-render domain. Unit level clock
1516 	 * gating for RCS should also be disabled.
1517 	 */
1518 	I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1519 				    ~GEN7_DOP_CLOCK_GATE_ENABLE));
1520 	I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
1521 				  GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1522 
1523 	config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1524 
1525 	/* It apparently takes a fairly long time for a new MUX
1526 	 * configuration to be be applied after these register writes.
1527 	 * This delay duration was derived empirically based on the
1528 	 * render_basic config but hopefully it covers the maximum
1529 	 * configuration latency.
1530 	 *
1531 	 * As a fallback, the checks in _append_oa_reports() to skip
1532 	 * invalid OA reports do also seem to work to discard reports
1533 	 * generated before this config has completed - albeit not
1534 	 * silently.
1535 	 *
1536 	 * Unfortunately this is essentially a magic number, since we
1537 	 * don't currently know of a reliable mechanism for predicting
1538 	 * how long the MUX config will take to apply and besides
1539 	 * seeing invalid reports we don't know of a reliable way to
1540 	 * explicitly check that the MUX config has landed.
1541 	 *
1542 	 * It's even possible we've miss characterized the underlying
1543 	 * problem - it just seems like the simplest explanation why
1544 	 * a delay at this location would mitigate any invalid reports.
1545 	 */
1546 	usleep_range(15000, 20000);
1547 
1548 	config_oa_regs(dev_priv, oa_config->b_counter_regs,
1549 		       oa_config->b_counter_regs_len);
1550 
1551 	return 0;
1552 }
1553 
hsw_disable_metric_set(struct drm_i915_private * dev_priv)1554 static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
1555 {
1556 	I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
1557 				  ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1558 	I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
1559 				    GEN7_DOP_CLOCK_GATE_ENABLE));
1560 
1561 	I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1562 				      ~GT_NOA_ENABLE));
1563 }
1564 
1565 /*
1566  * NB: It must always remain pointer safe to run this even if the OA unit
1567  * has been disabled.
1568  *
1569  * It's fine to put out-of-date values into these per-context registers
1570  * in the case that the OA unit has been disabled.
1571  */
gen8_update_reg_state_unlocked(struct i915_gem_context * ctx,u32 * reg_state,const struct i915_oa_config * oa_config)1572 static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
1573 					   u32 *reg_state,
1574 					   const struct i915_oa_config *oa_config)
1575 {
1576 	struct drm_i915_private *dev_priv = ctx->i915;
1577 	u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
1578 	u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
1579 	/* The MMIO offsets for Flex EU registers aren't contiguous */
1580 	u32 flex_mmio[] = {
1581 		i915_mmio_reg_offset(EU_PERF_CNTL0),
1582 		i915_mmio_reg_offset(EU_PERF_CNTL1),
1583 		i915_mmio_reg_offset(EU_PERF_CNTL2),
1584 		i915_mmio_reg_offset(EU_PERF_CNTL3),
1585 		i915_mmio_reg_offset(EU_PERF_CNTL4),
1586 		i915_mmio_reg_offset(EU_PERF_CNTL5),
1587 		i915_mmio_reg_offset(EU_PERF_CNTL6),
1588 	};
1589 	int i;
1590 
1591 	reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1592 	reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
1593 				      GEN8_OA_TIMER_PERIOD_SHIFT) |
1594 				     (dev_priv->perf.oa.periodic ?
1595 				      GEN8_OA_TIMER_ENABLE : 0) |
1596 				     GEN8_OA_COUNTER_RESUME;
1597 
1598 	for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1599 		u32 state_offset = ctx_flexeu0 + i * 2;
1600 		u32 mmio = flex_mmio[i];
1601 
1602 		/*
1603 		 * This arbitrary default will select the 'EU FPU0 Pipeline
1604 		 * Active' event. In the future it's anticipated that there
1605 		 * will be an explicit 'No Event' we can select, but not yet...
1606 		 */
1607 		u32 value = 0;
1608 
1609 		if (oa_config) {
1610 			u32 j;
1611 
1612 			for (j = 0; j < oa_config->flex_regs_len; j++) {
1613 				if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
1614 					value = oa_config->flex_regs[j].value;
1615 					break;
1616 				}
1617 			}
1618 		}
1619 
1620 		reg_state[state_offset] = mmio;
1621 		reg_state[state_offset+1] = value;
1622 	}
1623 }
1624 
1625 /*
1626  * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
1627  * is only used by the kernel context.
1628  */
gen8_emit_oa_config(struct drm_i915_gem_request * req,const struct i915_oa_config * oa_config)1629 static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
1630 			       const struct i915_oa_config *oa_config)
1631 {
1632 	struct drm_i915_private *dev_priv = req->i915;
1633 	/* The MMIO offsets for Flex EU registers aren't contiguous */
1634 	u32 flex_mmio[] = {
1635 		i915_mmio_reg_offset(EU_PERF_CNTL0),
1636 		i915_mmio_reg_offset(EU_PERF_CNTL1),
1637 		i915_mmio_reg_offset(EU_PERF_CNTL2),
1638 		i915_mmio_reg_offset(EU_PERF_CNTL3),
1639 		i915_mmio_reg_offset(EU_PERF_CNTL4),
1640 		i915_mmio_reg_offset(EU_PERF_CNTL5),
1641 		i915_mmio_reg_offset(EU_PERF_CNTL6),
1642 	};
1643 	u32 *cs;
1644 	int i;
1645 
1646 	cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
1647 	if (IS_ERR(cs))
1648 		return PTR_ERR(cs);
1649 
1650 	*cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
1651 
1652 	*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1653 	*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
1654 		(dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
1655 		GEN8_OA_COUNTER_RESUME;
1656 
1657 	for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1658 		u32 mmio = flex_mmio[i];
1659 
1660 		/*
1661 		 * This arbitrary default will select the 'EU FPU0 Pipeline
1662 		 * Active' event. In the future it's anticipated that there
1663 		 * will be an explicit 'No Event' we can select, but not
1664 		 * yet...
1665 		 */
1666 		u32 value = 0;
1667 
1668 		if (oa_config) {
1669 			u32 j;
1670 
1671 			for (j = 0; j < oa_config->flex_regs_len; j++) {
1672 				if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
1673 					value = oa_config->flex_regs[j].value;
1674 					break;
1675 				}
1676 			}
1677 		}
1678 
1679 		*cs++ = mmio;
1680 		*cs++ = value;
1681 	}
1682 
1683 	*cs++ = MI_NOOP;
1684 	intel_ring_advance(req, cs);
1685 
1686 	return 0;
1687 }
1688 
gen8_switch_to_updated_kernel_context(struct drm_i915_private * dev_priv,const struct i915_oa_config * oa_config)1689 static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
1690 						 const struct i915_oa_config *oa_config)
1691 {
1692 	struct intel_engine_cs *engine = dev_priv->engine[RCS];
1693 	struct i915_gem_timeline *timeline;
1694 	struct drm_i915_gem_request *req;
1695 	int ret;
1696 
1697 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
1698 
1699 	i915_gem_retire_requests(dev_priv);
1700 
1701 	req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
1702 	if (IS_ERR(req))
1703 		return PTR_ERR(req);
1704 
1705 	ret = gen8_emit_oa_config(req, oa_config);
1706 	if (ret) {
1707 		i915_add_request(req);
1708 		return ret;
1709 	}
1710 
1711 	/* Queue this switch after all other activity */
1712 	list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
1713 		struct drm_i915_gem_request *prev;
1714 		struct intel_timeline *tl;
1715 
1716 		tl = &timeline->engine[engine->id];
1717 		prev = i915_gem_active_raw(&tl->last_request,
1718 					   &dev_priv->drm.struct_mutex);
1719 		if (prev)
1720 			i915_sw_fence_await_sw_fence_gfp(&req->submit,
1721 							 &prev->submit,
1722 							 GFP_KERNEL);
1723 	}
1724 
1725 	ret = i915_switch_context(req);
1726 	i915_add_request(req);
1727 
1728 	return ret;
1729 }
1730 
1731 /*
1732  * Manages updating the per-context aspects of the OA stream
1733  * configuration across all contexts.
1734  *
1735  * The awkward consideration here is that OACTXCONTROL controls the
1736  * exponent for periodic sampling which is primarily used for system
1737  * wide profiling where we'd like a consistent sampling period even in
1738  * the face of context switches.
1739  *
1740  * Our approach of updating the register state context (as opposed to
1741  * say using a workaround batch buffer) ensures that the hardware
1742  * won't automatically reload an out-of-date timer exponent even
1743  * transiently before a WA BB could be parsed.
1744  *
1745  * This function needs to:
1746  * - Ensure the currently running context's per-context OA state is
1747  *   updated
1748  * - Ensure that all existing contexts will have the correct per-context
1749  *   OA state if they are scheduled for use.
1750  * - Ensure any new contexts will be initialized with the correct
1751  *   per-context OA state.
1752  *
1753  * Note: it's only the RCS/Render context that has any OA state.
1754  */
gen8_configure_all_contexts(struct drm_i915_private * dev_priv,const struct i915_oa_config * oa_config)1755 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1756 				       const struct i915_oa_config *oa_config)
1757 {
1758 	struct i915_gem_context *ctx;
1759 	int ret;
1760 	unsigned int wait_flags = I915_WAIT_LOCKED;
1761 
1762 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
1763 
1764 	/* Switch away from any user context. */
1765 	ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
1766 	if (ret)
1767 		goto out;
1768 
1769 	/*
1770 	 * The OA register config is setup through the context image. This image
1771 	 * might be written to by the GPU on context switch (in particular on
1772 	 * lite-restore). This means we can't safely update a context's image,
1773 	 * if this context is scheduled/submitted to run on the GPU.
1774 	 *
1775 	 * We could emit the OA register config through the batch buffer but
1776 	 * this might leave small interval of time where the OA unit is
1777 	 * configured at an invalid sampling period.
1778 	 *
1779 	 * So far the best way to work around this issue seems to be draining
1780 	 * the GPU from any submitted work.
1781 	 */
1782 	ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
1783 	if (ret)
1784 		goto out;
1785 
1786 	/* Update all contexts now that we've stalled the submission. */
1787 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1788 		struct intel_context *ce = &ctx->engine[RCS];
1789 		u32 *regs;
1790 
1791 		/* OA settings will be set upon first use */
1792 		if (!ce->state)
1793 			continue;
1794 
1795 		regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
1796 		if (IS_ERR(regs)) {
1797 			ret = PTR_ERR(regs);
1798 			goto out;
1799 		}
1800 
1801 		ce->state->obj->mm.dirty = true;
1802 		regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
1803 
1804 		gen8_update_reg_state_unlocked(ctx, regs, oa_config);
1805 
1806 		i915_gem_object_unpin_map(ce->state->obj);
1807 	}
1808 
1809  out:
1810 	return ret;
1811 }
1812 
gen8_enable_metric_set(struct drm_i915_private * dev_priv,const struct i915_oa_config * oa_config)1813 static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1814 				  const struct i915_oa_config *oa_config)
1815 {
1816 	int ret;
1817 
1818 	/*
1819 	 * We disable slice/unslice clock ratio change reports on SKL since
1820 	 * they are too noisy. The HW generates a lot of redundant reports
1821 	 * where the ratio hasn't really changed causing a lot of redundant
1822 	 * work to processes and increasing the chances we'll hit buffer
1823 	 * overruns.
1824 	 *
1825 	 * Although we don't currently use the 'disable overrun' OABUFFER
1826 	 * feature it's worth noting that clock ratio reports have to be
1827 	 * disabled before considering to use that feature since the HW doesn't
1828 	 * correctly block these reports.
1829 	 *
1830 	 * Currently none of the high-level metrics we have depend on knowing
1831 	 * this ratio to normalize.
1832 	 *
1833 	 * Note: This register is not power context saved and restored, but
1834 	 * that's OK considering that we disable RC6 while the OA unit is
1835 	 * enabled.
1836 	 *
1837 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
1838 	 * be read back from automatically triggered reports, as part of the
1839 	 * RPT_ID field.
1840 	 */
1841 	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
1842 	    IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
1843 		I915_WRITE(GEN8_OA_DEBUG,
1844 			   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
1845 					      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
1846 	}
1847 
1848 	/*
1849 	 * Update all contexts prior writing the mux configurations as we need
1850 	 * to make sure all slices/subslices are ON before writing to NOA
1851 	 * registers.
1852 	 */
1853 	ret = gen8_configure_all_contexts(dev_priv, oa_config);
1854 	if (ret)
1855 		return ret;
1856 
1857 	config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1858 
1859 	config_oa_regs(dev_priv, oa_config->b_counter_regs,
1860 		       oa_config->b_counter_regs_len);
1861 
1862 	return 0;
1863 }
1864 
gen8_disable_metric_set(struct drm_i915_private * dev_priv)1865 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1866 {
1867 	/* Reset all contexts' slices/subslices configurations. */
1868 	gen8_configure_all_contexts(dev_priv, NULL);
1869 
1870 	I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1871 				      ~GT_NOA_ENABLE));
1872 
1873 }
1874 
gen7_oa_enable(struct drm_i915_private * dev_priv)1875 static void gen7_oa_enable(struct drm_i915_private *dev_priv)
1876 {
1877 	/*
1878 	 * Reset buf pointers so we don't forward reports from before now.
1879 	 *
1880 	 * Think carefully if considering trying to avoid this, since it
1881 	 * also ensures status flags and the buffer itself are cleared
1882 	 * in error paths, and we have checks for invalid reports based
1883 	 * on the assumption that certain fields are written to zeroed
1884 	 * memory which this helps maintains.
1885 	 */
1886 	gen7_init_oa_buffer(dev_priv);
1887 
1888 	if (dev_priv->perf.oa.exclusive_stream->enabled) {
1889 		struct i915_gem_context *ctx =
1890 			dev_priv->perf.oa.exclusive_stream->ctx;
1891 		u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
1892 
1893 		bool periodic = dev_priv->perf.oa.periodic;
1894 		u32 period_exponent = dev_priv->perf.oa.period_exponent;
1895 		u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1896 
1897 		I915_WRITE(GEN7_OACONTROL,
1898 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
1899 			   (period_exponent <<
1900 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
1901 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
1902 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
1903 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
1904 			   GEN7_OACONTROL_ENABLE);
1905 	} else
1906 		I915_WRITE(GEN7_OACONTROL, 0);
1907 }
1908 
gen8_oa_enable(struct drm_i915_private * dev_priv)1909 static void gen8_oa_enable(struct drm_i915_private *dev_priv)
1910 {
1911 	u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1912 
1913 	/*
1914 	 * Reset buf pointers so we don't forward reports from before now.
1915 	 *
1916 	 * Think carefully if considering trying to avoid this, since it
1917 	 * also ensures status flags and the buffer itself are cleared
1918 	 * in error paths, and we have checks for invalid reports based
1919 	 * on the assumption that certain fields are written to zeroed
1920 	 * memory which this helps maintains.
1921 	 */
1922 	gen8_init_oa_buffer(dev_priv);
1923 
1924 	/*
1925 	 * Note: we don't rely on the hardware to perform single context
1926 	 * filtering and instead filter on the cpu based on the context-id
1927 	 * field of reports
1928 	 */
1929 	I915_WRITE(GEN8_OACONTROL, (report_format <<
1930 				    GEN8_OA_REPORT_FORMAT_SHIFT) |
1931 				   GEN8_OA_COUNTER_ENABLE);
1932 }
1933 
1934 /**
1935  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
1936  * @stream: An i915 perf stream opened for OA metrics
1937  *
1938  * [Re]enables hardware periodic sampling according to the period configured
1939  * when opening the stream. This also starts a hrtimer that will periodically
1940  * check for data in the circular OA buffer for notifying userspace (e.g.
1941  * during a read() or poll()).
1942  */
i915_oa_stream_enable(struct i915_perf_stream * stream)1943 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
1944 {
1945 	struct drm_i915_private *dev_priv = stream->dev_priv;
1946 
1947 	dev_priv->perf.oa.ops.oa_enable(dev_priv);
1948 
1949 	if (dev_priv->perf.oa.periodic)
1950 		hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
1951 			      ns_to_ktime(POLL_PERIOD),
1952 			      HRTIMER_MODE_REL_PINNED);
1953 }
1954 
gen7_oa_disable(struct drm_i915_private * dev_priv)1955 static void gen7_oa_disable(struct drm_i915_private *dev_priv)
1956 {
1957 	I915_WRITE(GEN7_OACONTROL, 0);
1958 }
1959 
gen8_oa_disable(struct drm_i915_private * dev_priv)1960 static void gen8_oa_disable(struct drm_i915_private *dev_priv)
1961 {
1962 	I915_WRITE(GEN8_OACONTROL, 0);
1963 }
1964 
1965 /**
1966  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
1967  * @stream: An i915 perf stream opened for OA metrics
1968  *
1969  * Stops the OA unit from periodically writing counter reports into the
1970  * circular OA buffer. This also stops the hrtimer that periodically checks for
1971  * data in the circular OA buffer, for notifying userspace.
1972  */
i915_oa_stream_disable(struct i915_perf_stream * stream)1973 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
1974 {
1975 	struct drm_i915_private *dev_priv = stream->dev_priv;
1976 
1977 	dev_priv->perf.oa.ops.oa_disable(dev_priv);
1978 
1979 	if (dev_priv->perf.oa.periodic)
1980 		hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
1981 }
1982 
1983 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
1984 	.destroy = i915_oa_stream_destroy,
1985 	.enable = i915_oa_stream_enable,
1986 	.disable = i915_oa_stream_disable,
1987 	.wait_unlocked = i915_oa_wait_unlocked,
1988 	.poll_wait = i915_oa_poll_wait,
1989 	.read = i915_oa_read,
1990 };
1991 
1992 /**
1993  * i915_oa_stream_init - validate combined props for OA stream and init
1994  * @stream: An i915 perf stream
1995  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
1996  * @props: The property state that configures stream (individually validated)
1997  *
1998  * While read_properties_unlocked() validates properties in isolation it
1999  * doesn't ensure that the combination necessarily makes sense.
2000  *
2001  * At this point it has been determined that userspace wants a stream of
2002  * OA metrics, but still we need to further validate the combined
2003  * properties are OK.
2004  *
2005  * If the configuration makes sense then we can allocate memory for
2006  * a circular OA buffer and apply the requested metric set configuration.
2007  *
2008  * Returns: zero on success or a negative error code.
2009  */
i915_oa_stream_init(struct i915_perf_stream * stream,struct drm_i915_perf_open_param * param,struct perf_open_properties * props)2010 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2011 			       struct drm_i915_perf_open_param *param,
2012 			       struct perf_open_properties *props)
2013 {
2014 	struct drm_i915_private *dev_priv = stream->dev_priv;
2015 	int format_size;
2016 	int ret;
2017 
2018 	/* If the sysfs metrics/ directory wasn't registered for some
2019 	 * reason then don't let userspace try their luck with config
2020 	 * IDs
2021 	 */
2022 	if (!dev_priv->perf.metrics_kobj) {
2023 		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2024 		return -EINVAL;
2025 	}
2026 
2027 	if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
2028 		DRM_DEBUG("Only OA report sampling supported\n");
2029 		return -EINVAL;
2030 	}
2031 
2032 	if (!dev_priv->perf.oa.ops.init_oa_buffer) {
2033 		DRM_DEBUG("OA unit not supported\n");
2034 		return -ENODEV;
2035 	}
2036 
2037 	/* To avoid the complexity of having to accurately filter
2038 	 * counter reports and marshal to the appropriate client
2039 	 * we currently only allow exclusive access
2040 	 */
2041 	if (dev_priv->perf.oa.exclusive_stream) {
2042 		DRM_DEBUG("OA unit already in use\n");
2043 		return -EBUSY;
2044 	}
2045 
2046 	if (!props->oa_format) {
2047 		DRM_DEBUG("OA report format not specified\n");
2048 		return -EINVAL;
2049 	}
2050 
2051 	/* We set up some ratelimit state to potentially throttle any _NOTES
2052 	 * about spurious, invalid OA reports which we don't forward to
2053 	 * userspace.
2054 	 *
2055 	 * The initialization is associated with opening the stream (not driver
2056 	 * init) considering we print a _NOTE about any throttling when closing
2057 	 * the stream instead of waiting until driver _fini which no one would
2058 	 * ever see.
2059 	 *
2060 	 * Using the same limiting factors as printk_ratelimit()
2061 	 */
2062 	ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
2063 			     5 * HZ, 10);
2064 	/* Since we use a DRM_NOTE for spurious reports it would be
2065 	 * inconsistent to let __ratelimit() automatically print a warning for
2066 	 * throttling.
2067 	 */
2068 	ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
2069 			    RATELIMIT_MSG_ON_RELEASE);
2070 
2071 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2072 
2073 	format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
2074 
2075 	stream->sample_flags |= SAMPLE_OA_REPORT;
2076 	stream->sample_size += format_size;
2077 
2078 	dev_priv->perf.oa.oa_buffer.format_size = format_size;
2079 	if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
2080 		return -EINVAL;
2081 
2082 	dev_priv->perf.oa.oa_buffer.format =
2083 		dev_priv->perf.oa.oa_formats[props->oa_format].format;
2084 
2085 	dev_priv->perf.oa.periodic = props->oa_periodic;
2086 	if (dev_priv->perf.oa.periodic)
2087 		dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
2088 
2089 	if (stream->ctx) {
2090 		ret = oa_get_render_ctx_id(stream);
2091 		if (ret)
2092 			return ret;
2093 	}
2094 
2095 	ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
2096 	if (ret)
2097 		goto err_config;
2098 
2099 	/* PRM - observability performance counters:
2100 	 *
2101 	 *   OACONTROL, performance counter enable, note:
2102 	 *
2103 	 *   "When this bit is set, in order to have coherent counts,
2104 	 *   RC6 power state and trunk clock gating must be disabled.
2105 	 *   This can be achieved by programming MMIO registers as
2106 	 *   0xA094=0 and 0xA090[31]=1"
2107 	 *
2108 	 *   In our case we are expecting that taking pm + FORCEWAKE
2109 	 *   references will effectively disable RC6.
2110 	 */
2111 	intel_runtime_pm_get(dev_priv);
2112 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2113 
2114 	ret = alloc_oa_buffer(dev_priv);
2115 	if (ret)
2116 		goto err_oa_buf_alloc;
2117 
2118 	ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2119 	if (ret)
2120 		goto err_lock;
2121 
2122 	ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
2123 						      stream->oa_config);
2124 	if (ret)
2125 		goto err_enable;
2126 
2127 	stream->ops = &i915_oa_stream_ops;
2128 
2129 	dev_priv->perf.oa.exclusive_stream = stream;
2130 
2131 	mutex_unlock(&dev_priv->drm.struct_mutex);
2132 
2133 	return 0;
2134 
2135 err_enable:
2136 	dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2137 	mutex_unlock(&dev_priv->drm.struct_mutex);
2138 
2139 err_lock:
2140 	free_oa_buffer(dev_priv);
2141 
2142 err_oa_buf_alloc:
2143 	put_oa_config(dev_priv, stream->oa_config);
2144 
2145 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2146 	intel_runtime_pm_put(dev_priv);
2147 
2148 err_config:
2149 	if (stream->ctx)
2150 		oa_put_render_ctx_id(stream);
2151 
2152 	return ret;
2153 }
2154 
i915_oa_init_reg_state(struct intel_engine_cs * engine,struct i915_gem_context * ctx,u32 * reg_state)2155 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
2156 			    struct i915_gem_context *ctx,
2157 			    u32 *reg_state)
2158 {
2159 	struct i915_perf_stream *stream;
2160 
2161 	if (engine->id != RCS)
2162 		return;
2163 
2164 	stream = engine->i915->perf.oa.exclusive_stream;
2165 	if (stream)
2166 		gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
2167 }
2168 
2169 /**
2170  * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
2171  * @stream: An i915 perf stream
2172  * @file: An i915 perf stream file
2173  * @buf: destination buffer given by userspace
2174  * @count: the number of bytes userspace wants to read
2175  * @ppos: (inout) file seek position (unused)
2176  *
2177  * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
2178  * ensure that if we've successfully copied any data then reporting that takes
2179  * precedence over any internal error status, so the data isn't lost.
2180  *
2181  * For example ret will be -ENOSPC whenever there is more buffered data than
2182  * can be copied to userspace, but that's only interesting if we weren't able
2183  * to copy some data because it implies the userspace buffer is too small to
2184  * receive a single record (and we never split records).
2185  *
2186  * Another case with ret == -EFAULT is more of a grey area since it would seem
2187  * like bad form for userspace to ask us to overrun its buffer, but the user
2188  * knows best:
2189  *
2190  *   http://yarchive.net/comp/linux/partial_reads_writes.html
2191  *
2192  * Returns: The number of bytes copied or a negative error code on failure.
2193  */
i915_perf_read_locked(struct i915_perf_stream * stream,struct file * file,char __user * buf,size_t count,loff_t * ppos)2194 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
2195 				     struct file *file,
2196 				     char __user *buf,
2197 				     size_t count,
2198 				     loff_t *ppos)
2199 {
2200 	/* Note we keep the offset (aka bytes read) separate from any
2201 	 * error status so that the final check for whether we return
2202 	 * the bytes read with a higher precedence than any error (see
2203 	 * comment below) doesn't need to be handled/duplicated in
2204 	 * stream->ops->read() implementations.
2205 	 */
2206 	size_t offset = 0;
2207 	int ret = stream->ops->read(stream, buf, count, &offset);
2208 
2209 	return offset ?: (ret ?: -EAGAIN);
2210 }
2211 
2212 /**
2213  * i915_perf_read - handles read() FOP for i915 perf stream FDs
2214  * @file: An i915 perf stream file
2215  * @buf: destination buffer given by userspace
2216  * @count: the number of bytes userspace wants to read
2217  * @ppos: (inout) file seek position (unused)
2218  *
2219  * The entry point for handling a read() on a stream file descriptor from
2220  * userspace. Most of the work is left to the i915_perf_read_locked() and
2221  * &i915_perf_stream_ops->read but to save having stream implementations (of
2222  * which we might have multiple later) we handle blocking read here.
2223  *
2224  * We can also consistently treat trying to read from a disabled stream
2225  * as an IO error so implementations can assume the stream is enabled
2226  * while reading.
2227  *
2228  * Returns: The number of bytes copied or a negative error code on failure.
2229  */
i915_perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2230 static ssize_t i915_perf_read(struct file *file,
2231 			      char __user *buf,
2232 			      size_t count,
2233 			      loff_t *ppos)
2234 {
2235 	struct i915_perf_stream *stream = file->private_data;
2236 	struct drm_i915_private *dev_priv = stream->dev_priv;
2237 	ssize_t ret;
2238 
2239 	/* To ensure it's handled consistently we simply treat all reads of a
2240 	 * disabled stream as an error. In particular it might otherwise lead
2241 	 * to a deadlock for blocking file descriptors...
2242 	 */
2243 	if (!stream->enabled)
2244 		return -EIO;
2245 
2246 	if (!(file->f_flags & O_NONBLOCK)) {
2247 		/* There's the small chance of false positives from
2248 		 * stream->ops->wait_unlocked.
2249 		 *
2250 		 * E.g. with single context filtering since we only wait until
2251 		 * oabuffer has >= 1 report we don't immediately know whether
2252 		 * any reports really belong to the current context
2253 		 */
2254 		do {
2255 			ret = stream->ops->wait_unlocked(stream);
2256 			if (ret)
2257 				return ret;
2258 
2259 			mutex_lock(&dev_priv->perf.lock);
2260 			ret = i915_perf_read_locked(stream, file,
2261 						    buf, count, ppos);
2262 			mutex_unlock(&dev_priv->perf.lock);
2263 		} while (ret == -EAGAIN);
2264 	} else {
2265 		mutex_lock(&dev_priv->perf.lock);
2266 		ret = i915_perf_read_locked(stream, file, buf, count, ppos);
2267 		mutex_unlock(&dev_priv->perf.lock);
2268 	}
2269 
2270 	/* We allow the poll checking to sometimes report false positive POLLIN
2271 	 * events where we might actually report EAGAIN on read() if there's
2272 	 * not really any data available. In this situation though we don't
2273 	 * want to enter a busy loop between poll() reporting a POLLIN event
2274 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
2275 	 * effectively ensures we back off until the next hrtimer callback
2276 	 * before reporting another POLLIN event.
2277 	 */
2278 	if (ret >= 0 || ret == -EAGAIN) {
2279 		/* Maybe make ->pollin per-stream state if we support multiple
2280 		 * concurrent streams in the future.
2281 		 */
2282 		dev_priv->perf.oa.pollin = false;
2283 	}
2284 
2285 	return ret;
2286 }
2287 
oa_poll_check_timer_cb(struct hrtimer * hrtimer)2288 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
2289 {
2290 	struct drm_i915_private *dev_priv =
2291 		container_of(hrtimer, typeof(*dev_priv),
2292 			     perf.oa.poll_check_timer);
2293 
2294 	if (oa_buffer_check_unlocked(dev_priv)) {
2295 		dev_priv->perf.oa.pollin = true;
2296 		wake_up(&dev_priv->perf.oa.poll_wq);
2297 	}
2298 
2299 	hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
2300 
2301 	return HRTIMER_RESTART;
2302 }
2303 
2304 /**
2305  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
2306  * @dev_priv: i915 device instance
2307  * @stream: An i915 perf stream
2308  * @file: An i915 perf stream file
2309  * @wait: poll() state table
2310  *
2311  * For handling userspace polling on an i915 perf stream, this calls through to
2312  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
2313  * will be woken for new stream data.
2314  *
2315  * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2316  * with any non-file-operation driver hooks.
2317  *
2318  * Returns: any poll events that are ready without sleeping
2319  */
i915_perf_poll_locked(struct drm_i915_private * dev_priv,struct i915_perf_stream * stream,struct file * file,poll_table * wait)2320 static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
2321 					  struct i915_perf_stream *stream,
2322 					  struct file *file,
2323 					  poll_table *wait)
2324 {
2325 	unsigned int events = 0;
2326 
2327 	stream->ops->poll_wait(stream, file, wait);
2328 
2329 	/* Note: we don't explicitly check whether there's something to read
2330 	 * here since this path may be very hot depending on what else
2331 	 * userspace is polling, or on the timeout in use. We rely solely on
2332 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
2333 	 * samples to read.
2334 	 */
2335 	if (dev_priv->perf.oa.pollin)
2336 		events |= POLLIN;
2337 
2338 	return events;
2339 }
2340 
2341 /**
2342  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
2343  * @file: An i915 perf stream file
2344  * @wait: poll() state table
2345  *
2346  * For handling userspace polling on an i915 perf stream, this ensures
2347  * poll_wait() gets called with a wait queue that will be woken for new stream
2348  * data.
2349  *
2350  * Note: Implementation deferred to i915_perf_poll_locked()
2351  *
2352  * Returns: any poll events that are ready without sleeping
2353  */
i915_perf_poll(struct file * file,poll_table * wait)2354 static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
2355 {
2356 	struct i915_perf_stream *stream = file->private_data;
2357 	struct drm_i915_private *dev_priv = stream->dev_priv;
2358 	int ret;
2359 
2360 	mutex_lock(&dev_priv->perf.lock);
2361 	ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
2362 	mutex_unlock(&dev_priv->perf.lock);
2363 
2364 	return ret;
2365 }
2366 
2367 /**
2368  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
2369  * @stream: A disabled i915 perf stream
2370  *
2371  * [Re]enables the associated capture of data for this stream.
2372  *
2373  * If a stream was previously enabled then there's currently no intention
2374  * to provide userspace any guarantee about the preservation of previously
2375  * buffered data.
2376  */
i915_perf_enable_locked(struct i915_perf_stream * stream)2377 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
2378 {
2379 	if (stream->enabled)
2380 		return;
2381 
2382 	/* Allow stream->ops->enable() to refer to this */
2383 	stream->enabled = true;
2384 
2385 	if (stream->ops->enable)
2386 		stream->ops->enable(stream);
2387 }
2388 
2389 /**
2390  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
2391  * @stream: An enabled i915 perf stream
2392  *
2393  * Disables the associated capture of data for this stream.
2394  *
2395  * The intention is that disabling an re-enabling a stream will ideally be
2396  * cheaper than destroying and re-opening a stream with the same configuration,
2397  * though there are no formal guarantees about what state or buffered data
2398  * must be retained between disabling and re-enabling a stream.
2399  *
2400  * Note: while a stream is disabled it's considered an error for userspace
2401  * to attempt to read from the stream (-EIO).
2402  */
i915_perf_disable_locked(struct i915_perf_stream * stream)2403 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
2404 {
2405 	if (!stream->enabled)
2406 		return;
2407 
2408 	/* Allow stream->ops->disable() to refer to this */
2409 	stream->enabled = false;
2410 
2411 	if (stream->ops->disable)
2412 		stream->ops->disable(stream);
2413 }
2414 
2415 /**
2416  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2417  * @stream: An i915 perf stream
2418  * @cmd: the ioctl request
2419  * @arg: the ioctl data
2420  *
2421  * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2422  * with any non-file-operation driver hooks.
2423  *
2424  * Returns: zero on success or a negative error code. Returns -EINVAL for
2425  * an unknown ioctl request.
2426  */
i915_perf_ioctl_locked(struct i915_perf_stream * stream,unsigned int cmd,unsigned long arg)2427 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
2428 				   unsigned int cmd,
2429 				   unsigned long arg)
2430 {
2431 	switch (cmd) {
2432 	case I915_PERF_IOCTL_ENABLE:
2433 		i915_perf_enable_locked(stream);
2434 		return 0;
2435 	case I915_PERF_IOCTL_DISABLE:
2436 		i915_perf_disable_locked(stream);
2437 		return 0;
2438 	}
2439 
2440 	return -EINVAL;
2441 }
2442 
2443 /**
2444  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2445  * @file: An i915 perf stream file
2446  * @cmd: the ioctl request
2447  * @arg: the ioctl data
2448  *
2449  * Implementation deferred to i915_perf_ioctl_locked().
2450  *
2451  * Returns: zero on success or a negative error code. Returns -EINVAL for
2452  * an unknown ioctl request.
2453  */
i915_perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2454 static long i915_perf_ioctl(struct file *file,
2455 			    unsigned int cmd,
2456 			    unsigned long arg)
2457 {
2458 	struct i915_perf_stream *stream = file->private_data;
2459 	struct drm_i915_private *dev_priv = stream->dev_priv;
2460 	long ret;
2461 
2462 	mutex_lock(&dev_priv->perf.lock);
2463 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
2464 	mutex_unlock(&dev_priv->perf.lock);
2465 
2466 	return ret;
2467 }
2468 
2469 /**
2470  * i915_perf_destroy_locked - destroy an i915 perf stream
2471  * @stream: An i915 perf stream
2472  *
2473  * Frees all resources associated with the given i915 perf @stream, disabling
2474  * any associated data capture in the process.
2475  *
2476  * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2477  * with any non-file-operation driver hooks.
2478  */
i915_perf_destroy_locked(struct i915_perf_stream * stream)2479 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
2480 {
2481 	if (stream->enabled)
2482 		i915_perf_disable_locked(stream);
2483 
2484 	if (stream->ops->destroy)
2485 		stream->ops->destroy(stream);
2486 
2487 	list_del(&stream->link);
2488 
2489 	if (stream->ctx)
2490 		i915_gem_context_put(stream->ctx);
2491 
2492 	kfree(stream);
2493 }
2494 
2495 /**
2496  * i915_perf_release - handles userspace close() of a stream file
2497  * @inode: anonymous inode associated with file
2498  * @file: An i915 perf stream file
2499  *
2500  * Cleans up any resources associated with an open i915 perf stream file.
2501  *
2502  * NB: close() can't really fail from the userspace point of view.
2503  *
2504  * Returns: zero on success or a negative error code.
2505  */
i915_perf_release(struct inode * inode,struct file * file)2506 static int i915_perf_release(struct inode *inode, struct file *file)
2507 {
2508 	struct i915_perf_stream *stream = file->private_data;
2509 	struct drm_i915_private *dev_priv = stream->dev_priv;
2510 
2511 	mutex_lock(&dev_priv->perf.lock);
2512 	i915_perf_destroy_locked(stream);
2513 	mutex_unlock(&dev_priv->perf.lock);
2514 
2515 	return 0;
2516 }
2517 
2518 
2519 static const struct file_operations fops = {
2520 	.owner		= THIS_MODULE,
2521 	.llseek		= no_llseek,
2522 	.release	= i915_perf_release,
2523 	.poll		= i915_perf_poll,
2524 	.read		= i915_perf_read,
2525 	.unlocked_ioctl	= i915_perf_ioctl,
2526 	/* Our ioctl have no arguments, so it's safe to use the same function
2527 	 * to handle 32bits compatibility.
2528 	 */
2529 	.compat_ioctl   = i915_perf_ioctl,
2530 };
2531 
2532 
2533 /**
2534  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
2535  * @dev_priv: i915 device instance
2536  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
2537  * @props: individually validated u64 property value pairs
2538  * @file: drm file
2539  *
2540  * See i915_perf_ioctl_open() for interface details.
2541  *
2542  * Implements further stream config validation and stream initialization on
2543  * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
2544  * taken to serialize with any non-file-operation driver hooks.
2545  *
2546  * Note: at this point the @props have only been validated in isolation and
2547  * it's still necessary to validate that the combination of properties makes
2548  * sense.
2549  *
2550  * In the case where userspace is interested in OA unit metrics then further
2551  * config validation and stream initialization details will be handled by
2552  * i915_oa_stream_init(). The code here should only validate config state that
2553  * will be relevant to all stream types / backends.
2554  *
2555  * Returns: zero on success or a negative error code.
2556  */
2557 static int
i915_perf_open_ioctl_locked(struct drm_i915_private * dev_priv,struct drm_i915_perf_open_param * param,struct perf_open_properties * props,struct drm_file * file)2558 i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
2559 			    struct drm_i915_perf_open_param *param,
2560 			    struct perf_open_properties *props,
2561 			    struct drm_file *file)
2562 {
2563 	struct i915_gem_context *specific_ctx = NULL;
2564 	struct i915_perf_stream *stream = NULL;
2565 	unsigned long f_flags = 0;
2566 	bool privileged_op = true;
2567 	int stream_fd;
2568 	int ret;
2569 
2570 	if (props->single_context) {
2571 		u32 ctx_handle = props->ctx_handle;
2572 		struct drm_i915_file_private *file_priv = file->driver_priv;
2573 
2574 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
2575 		if (!specific_ctx) {
2576 			DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
2577 				  ctx_handle);
2578 			ret = -ENOENT;
2579 			goto err;
2580 		}
2581 	}
2582 
2583 	/*
2584 	 * On Haswell the OA unit supports clock gating off for a specific
2585 	 * context and in this mode there's no visibility of metrics for the
2586 	 * rest of the system, which we consider acceptable for a
2587 	 * non-privileged client.
2588 	 *
2589 	 * For Gen8+ the OA unit no longer supports clock gating off for a
2590 	 * specific context and the kernel can't securely stop the counters
2591 	 * from updating as system-wide / global values. Even though we can
2592 	 * filter reports based on the included context ID we can't block
2593 	 * clients from seeing the raw / global counter values via
2594 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
2595 	 * enable the OA unit by default.
2596 	 */
2597 	if (IS_HASWELL(dev_priv) && specific_ctx)
2598 		privileged_op = false;
2599 
2600 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
2601 	 * we check a dev.i915.perf_stream_paranoid sysctl option
2602 	 * to determine if it's ok to access system wide OA counters
2603 	 * without CAP_SYS_ADMIN privileges.
2604 	 */
2605 	if (privileged_op &&
2606 	    i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
2607 		DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
2608 		ret = -EACCES;
2609 		goto err_ctx;
2610 	}
2611 
2612 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
2613 	if (!stream) {
2614 		ret = -ENOMEM;
2615 		goto err_ctx;
2616 	}
2617 
2618 	stream->dev_priv = dev_priv;
2619 	stream->ctx = specific_ctx;
2620 
2621 	ret = i915_oa_stream_init(stream, param, props);
2622 	if (ret)
2623 		goto err_alloc;
2624 
2625 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
2626 	 * to have _stream_init check the combination of sample flags more
2627 	 * thoroughly, but still this is the expected result at this point.
2628 	 */
2629 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
2630 		ret = -ENODEV;
2631 		goto err_flags;
2632 	}
2633 
2634 	list_add(&stream->link, &dev_priv->perf.streams);
2635 
2636 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
2637 		f_flags |= O_CLOEXEC;
2638 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
2639 		f_flags |= O_NONBLOCK;
2640 
2641 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
2642 	if (stream_fd < 0) {
2643 		ret = stream_fd;
2644 		goto err_open;
2645 	}
2646 
2647 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
2648 		i915_perf_enable_locked(stream);
2649 
2650 	return stream_fd;
2651 
2652 err_open:
2653 	list_del(&stream->link);
2654 err_flags:
2655 	if (stream->ops->destroy)
2656 		stream->ops->destroy(stream);
2657 err_alloc:
2658 	kfree(stream);
2659 err_ctx:
2660 	if (specific_ctx)
2661 		i915_gem_context_put(specific_ctx);
2662 err:
2663 	return ret;
2664 }
2665 
oa_exponent_to_ns(struct drm_i915_private * dev_priv,int exponent)2666 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
2667 {
2668 	return div_u64(1000000000ULL * (2ULL << exponent),
2669 		       dev_priv->perf.oa.timestamp_frequency);
2670 }
2671 
2672 /**
2673  * read_properties_unlocked - validate + copy userspace stream open properties
2674  * @dev_priv: i915 device instance
2675  * @uprops: The array of u64 key value pairs given by userspace
2676  * @n_props: The number of key value pairs expected in @uprops
2677  * @props: The stream configuration built up while validating properties
2678  *
2679  * Note this function only validates properties in isolation it doesn't
2680  * validate that the combination of properties makes sense or that all
2681  * properties necessary for a particular kind of stream have been set.
2682  *
2683  * Note that there currently aren't any ordering requirements for properties so
2684  * we shouldn't validate or assume anything about ordering here. This doesn't
2685  * rule out defining new properties with ordering requirements in the future.
2686  */
read_properties_unlocked(struct drm_i915_private * dev_priv,u64 __user * uprops,u32 n_props,struct perf_open_properties * props)2687 static int read_properties_unlocked(struct drm_i915_private *dev_priv,
2688 				    u64 __user *uprops,
2689 				    u32 n_props,
2690 				    struct perf_open_properties *props)
2691 {
2692 	u64 __user *uprop = uprops;
2693 	u32 i;
2694 
2695 	memset(props, 0, sizeof(struct perf_open_properties));
2696 
2697 	if (!n_props) {
2698 		DRM_DEBUG("No i915 perf properties given\n");
2699 		return -EINVAL;
2700 	}
2701 
2702 	/* Considering that ID = 0 is reserved and assuming that we don't
2703 	 * (currently) expect any configurations to ever specify duplicate
2704 	 * values for a particular property ID then the last _PROP_MAX value is
2705 	 * one greater than the maximum number of properties we expect to get
2706 	 * from userspace.
2707 	 */
2708 	if (n_props >= DRM_I915_PERF_PROP_MAX) {
2709 		DRM_DEBUG("More i915 perf properties specified than exist\n");
2710 		return -EINVAL;
2711 	}
2712 
2713 	for (i = 0; i < n_props; i++) {
2714 		u64 oa_period, oa_freq_hz;
2715 		u64 id, value;
2716 		int ret;
2717 
2718 		ret = get_user(id, uprop);
2719 		if (ret)
2720 			return ret;
2721 
2722 		ret = get_user(value, uprop + 1);
2723 		if (ret)
2724 			return ret;
2725 
2726 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
2727 			DRM_DEBUG("Unknown i915 perf property ID\n");
2728 			return -EINVAL;
2729 		}
2730 
2731 		switch ((enum drm_i915_perf_property_id)id) {
2732 		case DRM_I915_PERF_PROP_CTX_HANDLE:
2733 			props->single_context = 1;
2734 			props->ctx_handle = value;
2735 			break;
2736 		case DRM_I915_PERF_PROP_SAMPLE_OA:
2737 			props->sample_flags |= SAMPLE_OA_REPORT;
2738 			break;
2739 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
2740 			if (value == 0) {
2741 				DRM_DEBUG("Unknown OA metric set ID\n");
2742 				return -EINVAL;
2743 			}
2744 			props->metrics_set = value;
2745 			break;
2746 		case DRM_I915_PERF_PROP_OA_FORMAT:
2747 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
2748 				DRM_DEBUG("Out-of-range OA report format %llu\n",
2749 					  value);
2750 				return -EINVAL;
2751 			}
2752 			if (!dev_priv->perf.oa.oa_formats[value].size) {
2753 				DRM_DEBUG("Unsupported OA report format %llu\n",
2754 					  value);
2755 				return -EINVAL;
2756 			}
2757 			props->oa_format = value;
2758 			break;
2759 		case DRM_I915_PERF_PROP_OA_EXPONENT:
2760 			if (value > OA_EXPONENT_MAX) {
2761 				DRM_DEBUG("OA timer exponent too high (> %u)\n",
2762 					 OA_EXPONENT_MAX);
2763 				return -EINVAL;
2764 			}
2765 
2766 			/* Theoretically we can program the OA unit to sample
2767 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
2768 			 * for BXT. We don't allow such high sampling
2769 			 * frequencies by default unless root.
2770 			 */
2771 
2772 			BUILD_BUG_ON(sizeof(oa_period) != 8);
2773 			oa_period = oa_exponent_to_ns(dev_priv, value);
2774 
2775 			/* This check is primarily to ensure that oa_period <=
2776 			 * UINT32_MAX (before passing to do_div which only
2777 			 * accepts a u32 denominator), but we can also skip
2778 			 * checking anything < 1Hz which implicitly can't be
2779 			 * limited via an integer oa_max_sample_rate.
2780 			 */
2781 			if (oa_period <= NSEC_PER_SEC) {
2782 				u64 tmp = NSEC_PER_SEC;
2783 				do_div(tmp, oa_period);
2784 				oa_freq_hz = tmp;
2785 			} else
2786 				oa_freq_hz = 0;
2787 
2788 			if (oa_freq_hz > i915_oa_max_sample_rate &&
2789 			    !capable(CAP_SYS_ADMIN)) {
2790 				DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
2791 					  i915_oa_max_sample_rate);
2792 				return -EACCES;
2793 			}
2794 
2795 			props->oa_periodic = true;
2796 			props->oa_period_exponent = value;
2797 			break;
2798 		case DRM_I915_PERF_PROP_MAX:
2799 			MISSING_CASE(id);
2800 			return -EINVAL;
2801 		}
2802 
2803 		uprop += 2;
2804 	}
2805 
2806 	return 0;
2807 }
2808 
2809 /**
2810  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
2811  * @dev: drm device
2812  * @data: ioctl data copied from userspace (unvalidated)
2813  * @file: drm file
2814  *
2815  * Validates the stream open parameters given by userspace including flags
2816  * and an array of u64 key, value pair properties.
2817  *
2818  * Very little is assumed up front about the nature of the stream being
2819  * opened (for instance we don't assume it's for periodic OA unit metrics). An
2820  * i915-perf stream is expected to be a suitable interface for other forms of
2821  * buffered data written by the GPU besides periodic OA metrics.
2822  *
2823  * Note we copy the properties from userspace outside of the i915 perf
2824  * mutex to avoid an awkward lockdep with mmap_sem.
2825  *
2826  * Most of the implementation details are handled by
2827  * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
2828  * mutex for serializing with any non-file-operation driver hooks.
2829  *
2830  * Return: A newly opened i915 Perf stream file descriptor or negative
2831  * error code on failure.
2832  */
i915_perf_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2833 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
2834 			 struct drm_file *file)
2835 {
2836 	struct drm_i915_private *dev_priv = dev->dev_private;
2837 	struct drm_i915_perf_open_param *param = data;
2838 	struct perf_open_properties props;
2839 	u32 known_open_flags;
2840 	int ret;
2841 
2842 	if (!dev_priv->perf.initialized) {
2843 		DRM_DEBUG("i915 perf interface not available for this system\n");
2844 		return -ENOTSUPP;
2845 	}
2846 
2847 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
2848 			   I915_PERF_FLAG_FD_NONBLOCK |
2849 			   I915_PERF_FLAG_DISABLED;
2850 	if (param->flags & ~known_open_flags) {
2851 		DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
2852 		return -EINVAL;
2853 	}
2854 
2855 	ret = read_properties_unlocked(dev_priv,
2856 				       u64_to_user_ptr(param->properties_ptr),
2857 				       param->num_properties,
2858 				       &props);
2859 	if (ret)
2860 		return ret;
2861 
2862 	mutex_lock(&dev_priv->perf.lock);
2863 	ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
2864 	mutex_unlock(&dev_priv->perf.lock);
2865 
2866 	return ret;
2867 }
2868 
2869 /**
2870  * i915_perf_register - exposes i915-perf to userspace
2871  * @dev_priv: i915 device instance
2872  *
2873  * In particular OA metric sets are advertised under a sysfs metrics/
2874  * directory allowing userspace to enumerate valid IDs that can be
2875  * used to open an i915-perf stream.
2876  */
i915_perf_register(struct drm_i915_private * dev_priv)2877 void i915_perf_register(struct drm_i915_private *dev_priv)
2878 {
2879 	int ret;
2880 
2881 	if (!dev_priv->perf.initialized)
2882 		return;
2883 
2884 	/* To be sure we're synchronized with an attempted
2885 	 * i915_perf_open_ioctl(); considering that we register after
2886 	 * being exposed to userspace.
2887 	 */
2888 	mutex_lock(&dev_priv->perf.lock);
2889 
2890 	dev_priv->perf.metrics_kobj =
2891 		kobject_create_and_add("metrics",
2892 				       &dev_priv->drm.primary->kdev->kobj);
2893 	if (!dev_priv->perf.metrics_kobj)
2894 		goto exit;
2895 
2896 	sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
2897 
2898 	if (IS_HASWELL(dev_priv)) {
2899 		i915_perf_load_test_config_hsw(dev_priv);
2900 	} else if (IS_BROADWELL(dev_priv)) {
2901 		i915_perf_load_test_config_bdw(dev_priv);
2902 	} else if (IS_CHERRYVIEW(dev_priv)) {
2903 		i915_perf_load_test_config_chv(dev_priv);
2904 	} else if (IS_SKYLAKE(dev_priv)) {
2905 		if (IS_SKL_GT2(dev_priv))
2906 			i915_perf_load_test_config_sklgt2(dev_priv);
2907 		else if (IS_SKL_GT3(dev_priv))
2908 			i915_perf_load_test_config_sklgt3(dev_priv);
2909 		else if (IS_SKL_GT4(dev_priv))
2910 			i915_perf_load_test_config_sklgt4(dev_priv);
2911 	} else if (IS_BROXTON(dev_priv)) {
2912 		i915_perf_load_test_config_bxt(dev_priv);
2913 	} else if (IS_KABYLAKE(dev_priv)) {
2914 		if (IS_KBL_GT2(dev_priv))
2915 			i915_perf_load_test_config_kblgt2(dev_priv);
2916 		else if (IS_KBL_GT3(dev_priv))
2917 			i915_perf_load_test_config_kblgt3(dev_priv);
2918 	} else if (IS_GEMINILAKE(dev_priv)) {
2919 		i915_perf_load_test_config_glk(dev_priv);
2920 	}
2921 
2922 	if (dev_priv->perf.oa.test_config.id == 0)
2923 		goto sysfs_error;
2924 
2925 	ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
2926 				 &dev_priv->perf.oa.test_config.sysfs_metric);
2927 	if (ret)
2928 		goto sysfs_error;
2929 
2930 	atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
2931 
2932 	goto exit;
2933 
2934 sysfs_error:
2935 	kobject_put(dev_priv->perf.metrics_kobj);
2936 	dev_priv->perf.metrics_kobj = NULL;
2937 
2938 exit:
2939 	mutex_unlock(&dev_priv->perf.lock);
2940 }
2941 
2942 /**
2943  * i915_perf_unregister - hide i915-perf from userspace
2944  * @dev_priv: i915 device instance
2945  *
2946  * i915-perf state cleanup is split up into an 'unregister' and
2947  * 'deinit' phase where the interface is first hidden from
2948  * userspace by i915_perf_unregister() before cleaning up
2949  * remaining state in i915_perf_fini().
2950  */
i915_perf_unregister(struct drm_i915_private * dev_priv)2951 void i915_perf_unregister(struct drm_i915_private *dev_priv)
2952 {
2953 	if (!dev_priv->perf.metrics_kobj)
2954 		return;
2955 
2956 	sysfs_remove_group(dev_priv->perf.metrics_kobj,
2957 			   &dev_priv->perf.oa.test_config.sysfs_metric);
2958 
2959 	kobject_put(dev_priv->perf.metrics_kobj);
2960 	dev_priv->perf.metrics_kobj = NULL;
2961 }
2962 
gen8_is_valid_flex_addr(struct drm_i915_private * dev_priv,u32 addr)2963 static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
2964 {
2965 	static const i915_reg_t flex_eu_regs[] = {
2966 		EU_PERF_CNTL0,
2967 		EU_PERF_CNTL1,
2968 		EU_PERF_CNTL2,
2969 		EU_PERF_CNTL3,
2970 		EU_PERF_CNTL4,
2971 		EU_PERF_CNTL5,
2972 		EU_PERF_CNTL6,
2973 	};
2974 	int i;
2975 
2976 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
2977 		if (flex_eu_regs[i].reg == addr)
2978 			return true;
2979 	}
2980 	return false;
2981 }
2982 
gen7_is_valid_b_counter_addr(struct drm_i915_private * dev_priv,u32 addr)2983 static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
2984 {
2985 	return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) ||
2986 		(addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) ||
2987 		(addr >= OACEC0_0.reg && addr <= OACEC7_1.reg);
2988 }
2989 
gen7_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)2990 static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
2991 {
2992 	return addr == HALF_SLICE_CHICKEN2.reg ||
2993 		(addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) ||
2994 		(addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) ||
2995 		(addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg);
2996 }
2997 
gen8_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)2998 static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
2999 {
3000 	return gen7_is_valid_mux_addr(dev_priv, addr) ||
3001 		addr == WAIT_FOR_RC6_EXIT.reg ||
3002 		(addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg);
3003 }
3004 
hsw_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)3005 static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3006 {
3007 	return gen7_is_valid_mux_addr(dev_priv, addr) ||
3008 		(addr >= 0x25100 && addr <= 0x2FF90) ||
3009 		addr == 0x9ec0;
3010 }
3011 
chv_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)3012 static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3013 {
3014 	return gen7_is_valid_mux_addr(dev_priv, addr) ||
3015 		(addr >= 0x182300 && addr <= 0x1823A4);
3016 }
3017 
mask_reg_value(u32 reg,u32 val)3018 static uint32_t mask_reg_value(u32 reg, u32 val)
3019 {
3020 	/* HALF_SLICE_CHICKEN2 is programmed with a the
3021 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3022 	 * programmed by userspace doesn't change this.
3023 	 */
3024 	if (HALF_SLICE_CHICKEN2.reg == reg)
3025 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3026 
3027 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3028 	 * indicated by its name and a bunch of selection fields used by OA
3029 	 * configs.
3030 	 */
3031 	if (WAIT_FOR_RC6_EXIT.reg == reg)
3032 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3033 
3034 	return val;
3035 }
3036 
alloc_oa_regs(struct drm_i915_private * dev_priv,bool (* is_valid)(struct drm_i915_private * dev_priv,u32 addr),u32 __user * regs,u32 n_regs)3037 static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
3038 					 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
3039 					 u32 __user *regs,
3040 					 u32 n_regs)
3041 {
3042 	struct i915_oa_reg *oa_regs;
3043 	int err;
3044 	u32 i;
3045 
3046 	if (!n_regs)
3047 		return NULL;
3048 
3049 	if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
3050 		return ERR_PTR(-EFAULT);
3051 
3052 	/* No is_valid function means we're not allowing any register to be programmed. */
3053 	GEM_BUG_ON(!is_valid);
3054 	if (!is_valid)
3055 		return ERR_PTR(-EINVAL);
3056 
3057 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3058 	if (!oa_regs)
3059 		return ERR_PTR(-ENOMEM);
3060 
3061 	for (i = 0; i < n_regs; i++) {
3062 		u32 addr, value;
3063 
3064 		err = get_user(addr, regs);
3065 		if (err)
3066 			goto addr_err;
3067 
3068 		if (!is_valid(dev_priv, addr)) {
3069 			DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3070 			err = -EINVAL;
3071 			goto addr_err;
3072 		}
3073 
3074 		err = get_user(value, regs + 1);
3075 		if (err)
3076 			goto addr_err;
3077 
3078 		oa_regs[i].addr = _MMIO(addr);
3079 		oa_regs[i].value = mask_reg_value(addr, value);
3080 
3081 		regs += 2;
3082 	}
3083 
3084 	return oa_regs;
3085 
3086 addr_err:
3087 	kfree(oa_regs);
3088 	return ERR_PTR(err);
3089 }
3090 
show_dynamic_id(struct device * dev,struct device_attribute * attr,char * buf)3091 static ssize_t show_dynamic_id(struct device *dev,
3092 			       struct device_attribute *attr,
3093 			       char *buf)
3094 {
3095 	struct i915_oa_config *oa_config =
3096 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
3097 
3098 	return sprintf(buf, "%d\n", oa_config->id);
3099 }
3100 
create_dynamic_oa_sysfs_entry(struct drm_i915_private * dev_priv,struct i915_oa_config * oa_config)3101 static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
3102 					 struct i915_oa_config *oa_config)
3103 {
3104 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
3105 	oa_config->sysfs_metric_id.attr.name = "id";
3106 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3107 	oa_config->sysfs_metric_id.show = show_dynamic_id;
3108 	oa_config->sysfs_metric_id.store = NULL;
3109 
3110 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3111 	oa_config->attrs[1] = NULL;
3112 
3113 	oa_config->sysfs_metric.name = oa_config->uuid;
3114 	oa_config->sysfs_metric.attrs = oa_config->attrs;
3115 
3116 	return sysfs_create_group(dev_priv->perf.metrics_kobj,
3117 				  &oa_config->sysfs_metric);
3118 }
3119 
3120 /**
3121  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
3122  * @dev: drm device
3123  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
3124  *        userspace (unvalidated)
3125  * @file: drm file
3126  *
3127  * Validates the submitted OA register to be saved into a new OA config that
3128  * can then be used for programming the OA unit and its NOA network.
3129  *
3130  * Returns: A new allocated config number to be used with the perf open ioctl
3131  * or a negative error code on failure.
3132  */
i915_perf_add_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3133 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3134 			       struct drm_file *file)
3135 {
3136 	struct drm_i915_private *dev_priv = dev->dev_private;
3137 	struct drm_i915_perf_oa_config *args = data;
3138 	struct i915_oa_config *oa_config, *tmp;
3139 	int err, id;
3140 
3141 	if (!dev_priv->perf.initialized) {
3142 		DRM_DEBUG("i915 perf interface not available for this system\n");
3143 		return -ENOTSUPP;
3144 	}
3145 
3146 	if (!dev_priv->perf.metrics_kobj) {
3147 		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
3148 		return -EINVAL;
3149 	}
3150 
3151 	if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3152 		DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
3153 		return -EACCES;
3154 	}
3155 
3156 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
3157 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
3158 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
3159 		DRM_DEBUG("No OA registers given\n");
3160 		return -EINVAL;
3161 	}
3162 
3163 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
3164 	if (!oa_config) {
3165 		DRM_DEBUG("Failed to allocate memory for the OA config\n");
3166 		return -ENOMEM;
3167 	}
3168 
3169 	atomic_set(&oa_config->ref_count, 1);
3170 
3171 	if (!uuid_is_valid(args->uuid)) {
3172 		DRM_DEBUG("Invalid uuid format for OA config\n");
3173 		err = -EINVAL;
3174 		goto reg_err;
3175 	}
3176 
3177 	/* Last character in oa_config->uuid will be 0 because oa_config is
3178 	 * kzalloc.
3179 	 */
3180 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
3181 
3182 	oa_config->mux_regs_len = args->n_mux_regs;
3183 	oa_config->mux_regs =
3184 		alloc_oa_regs(dev_priv,
3185 			      dev_priv->perf.oa.ops.is_valid_mux_reg,
3186 			      u64_to_user_ptr(args->mux_regs_ptr),
3187 			      args->n_mux_regs);
3188 
3189 	if (IS_ERR(oa_config->mux_regs)) {
3190 		DRM_DEBUG("Failed to create OA config for mux_regs\n");
3191 		err = PTR_ERR(oa_config->mux_regs);
3192 		goto reg_err;
3193 	}
3194 
3195 	oa_config->b_counter_regs_len = args->n_boolean_regs;
3196 	oa_config->b_counter_regs =
3197 		alloc_oa_regs(dev_priv,
3198 			      dev_priv->perf.oa.ops.is_valid_b_counter_reg,
3199 			      u64_to_user_ptr(args->boolean_regs_ptr),
3200 			      args->n_boolean_regs);
3201 
3202 	if (IS_ERR(oa_config->b_counter_regs)) {
3203 		DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
3204 		err = PTR_ERR(oa_config->b_counter_regs);
3205 		goto reg_err;
3206 	}
3207 
3208 	if (INTEL_GEN(dev_priv) < 8) {
3209 		if (args->n_flex_regs != 0) {
3210 			err = -EINVAL;
3211 			goto reg_err;
3212 		}
3213 	} else {
3214 		oa_config->flex_regs_len = args->n_flex_regs;
3215 		oa_config->flex_regs =
3216 			alloc_oa_regs(dev_priv,
3217 				      dev_priv->perf.oa.ops.is_valid_flex_reg,
3218 				      u64_to_user_ptr(args->flex_regs_ptr),
3219 				      args->n_flex_regs);
3220 
3221 		if (IS_ERR(oa_config->flex_regs)) {
3222 			DRM_DEBUG("Failed to create OA config for flex_regs\n");
3223 			err = PTR_ERR(oa_config->flex_regs);
3224 			goto reg_err;
3225 		}
3226 	}
3227 
3228 	err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3229 	if (err)
3230 		goto reg_err;
3231 
3232 	/* We shouldn't have too many configs, so this iteration shouldn't be
3233 	 * too costly.
3234 	 */
3235 	idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
3236 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
3237 			DRM_DEBUG("OA config already exists with this uuid\n");
3238 			err = -EADDRINUSE;
3239 			goto sysfs_err;
3240 		}
3241 	}
3242 
3243 	err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
3244 	if (err) {
3245 		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3246 		goto sysfs_err;
3247 	}
3248 
3249 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
3250 	oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
3251 				  oa_config, 2,
3252 				  0, GFP_KERNEL);
3253 	if (oa_config->id < 0) {
3254 		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3255 		err = oa_config->id;
3256 		goto sysfs_err;
3257 	}
3258 
3259 	mutex_unlock(&dev_priv->perf.metrics_lock);
3260 
3261 	return oa_config->id;
3262 
3263 sysfs_err:
3264 	mutex_unlock(&dev_priv->perf.metrics_lock);
3265 reg_err:
3266 	put_oa_config(dev_priv, oa_config);
3267 	DRM_DEBUG("Failed to add new OA config\n");
3268 	return err;
3269 }
3270 
3271 /**
3272  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
3273  * @dev: drm device
3274  * @data: ioctl data (pointer to u64 integer) copied from userspace
3275  * @file: drm file
3276  *
3277  * Configs can be removed while being used, the will stop appearing in sysfs
3278  * and their content will be freed when the stream using the config is closed.
3279  *
3280  * Returns: 0 on success or a negative error code on failure.
3281  */
i915_perf_remove_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3282 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3283 				  struct drm_file *file)
3284 {
3285 	struct drm_i915_private *dev_priv = dev->dev_private;
3286 	u64 *arg = data;
3287 	struct i915_oa_config *oa_config;
3288 	int ret;
3289 
3290 	if (!dev_priv->perf.initialized) {
3291 		DRM_DEBUG("i915 perf interface not available for this system\n");
3292 		return -ENOTSUPP;
3293 	}
3294 
3295 	if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3296 		DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
3297 		return -EACCES;
3298 	}
3299 
3300 	ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3301 	if (ret)
3302 		goto lock_err;
3303 
3304 	oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
3305 	if (!oa_config) {
3306 		DRM_DEBUG("Failed to remove unknown OA config\n");
3307 		ret = -ENOENT;
3308 		goto config_err;
3309 	}
3310 
3311 	GEM_BUG_ON(*arg != oa_config->id);
3312 
3313 	sysfs_remove_group(dev_priv->perf.metrics_kobj,
3314 			   &oa_config->sysfs_metric);
3315 
3316 	idr_remove(&dev_priv->perf.metrics_idr, *arg);
3317 	put_oa_config(dev_priv, oa_config);
3318 
3319 config_err:
3320 	mutex_unlock(&dev_priv->perf.metrics_lock);
3321 lock_err:
3322 	return ret;
3323 }
3324 
3325 static struct ctl_table oa_table[] = {
3326 	{
3327 	 .procname = "perf_stream_paranoid",
3328 	 .data = &i915_perf_stream_paranoid,
3329 	 .maxlen = sizeof(i915_perf_stream_paranoid),
3330 	 .mode = 0644,
3331 	 .proc_handler = proc_dointvec_minmax,
3332 	 .extra1 = &zero,
3333 	 .extra2 = &one,
3334 	 },
3335 	{
3336 	 .procname = "oa_max_sample_rate",
3337 	 .data = &i915_oa_max_sample_rate,
3338 	 .maxlen = sizeof(i915_oa_max_sample_rate),
3339 	 .mode = 0644,
3340 	 .proc_handler = proc_dointvec_minmax,
3341 	 .extra1 = &zero,
3342 	 .extra2 = &oa_sample_rate_hard_limit,
3343 	 },
3344 	{}
3345 };
3346 
3347 static struct ctl_table i915_root[] = {
3348 	{
3349 	 .procname = "i915",
3350 	 .maxlen = 0,
3351 	 .mode = 0555,
3352 	 .child = oa_table,
3353 	 },
3354 	{}
3355 };
3356 
3357 static struct ctl_table dev_root[] = {
3358 	{
3359 	 .procname = "dev",
3360 	 .maxlen = 0,
3361 	 .mode = 0555,
3362 	 .child = i915_root,
3363 	 },
3364 	{}
3365 };
3366 
3367 /**
3368  * i915_perf_init - initialize i915-perf state on module load
3369  * @dev_priv: i915 device instance
3370  *
3371  * Initializes i915-perf state without exposing anything to userspace.
3372  *
3373  * Note: i915-perf initialization is split into an 'init' and 'register'
3374  * phase with the i915_perf_register() exposing state to userspace.
3375  */
i915_perf_init(struct drm_i915_private * dev_priv)3376 void i915_perf_init(struct drm_i915_private *dev_priv)
3377 {
3378 	dev_priv->perf.oa.timestamp_frequency = 0;
3379 
3380 	if (IS_HASWELL(dev_priv)) {
3381 		dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3382 			gen7_is_valid_b_counter_addr;
3383 		dev_priv->perf.oa.ops.is_valid_mux_reg =
3384 			hsw_is_valid_mux_addr;
3385 		dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
3386 		dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
3387 		dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
3388 		dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
3389 		dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
3390 		dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
3391 		dev_priv->perf.oa.ops.read = gen7_oa_read;
3392 		dev_priv->perf.oa.ops.oa_hw_tail_read =
3393 			gen7_oa_hw_tail_read;
3394 
3395 		dev_priv->perf.oa.timestamp_frequency = 12500000;
3396 
3397 		dev_priv->perf.oa.oa_formats = hsw_oa_formats;
3398 	} else if (i915.enable_execlists) {
3399 		/* Note: that although we could theoretically also support the
3400 		 * legacy ringbuffer mode on BDW (and earlier iterations of
3401 		 * this driver, before upstreaming did this) it didn't seem
3402 		 * worth the complexity to maintain now that BDW+ enable
3403 		 * execlist mode by default.
3404 		 */
3405 		dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3406 			gen7_is_valid_b_counter_addr;
3407 		dev_priv->perf.oa.ops.is_valid_mux_reg =
3408 			gen8_is_valid_mux_addr;
3409 		dev_priv->perf.oa.ops.is_valid_flex_reg =
3410 			gen8_is_valid_flex_addr;
3411 
3412 		dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
3413 		dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
3414 		dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
3415 		dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
3416 		dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
3417 		dev_priv->perf.oa.ops.read = gen8_oa_read;
3418 		dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
3419 
3420 		dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
3421 
3422 		if (IS_GEN8(dev_priv)) {
3423 			dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
3424 			dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
3425 
3426 			dev_priv->perf.oa.timestamp_frequency = 12500000;
3427 
3428 			dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
3429 			if (IS_CHERRYVIEW(dev_priv)) {
3430 				dev_priv->perf.oa.ops.is_valid_mux_reg =
3431 					chv_is_valid_mux_addr;
3432 			}
3433 		} else if (IS_GEN9(dev_priv)) {
3434 			dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
3435 			dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
3436 
3437 			dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
3438 
3439 			switch (dev_priv->info.platform) {
3440 			case INTEL_BROXTON:
3441 			case INTEL_GEMINILAKE:
3442 				dev_priv->perf.oa.timestamp_frequency = 19200000;
3443 				break;
3444 			case INTEL_SKYLAKE:
3445 			case INTEL_KABYLAKE:
3446 				dev_priv->perf.oa.timestamp_frequency = 12000000;
3447 				break;
3448 			default:
3449 				/* Leave timestamp_frequency to 0 so we can
3450 				 * detect unsupported platforms.
3451 				 */
3452 				break;
3453 			}
3454 		}
3455 	}
3456 
3457 	if (dev_priv->perf.oa.timestamp_frequency) {
3458 		hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
3459 				CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3460 		dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
3461 		init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
3462 
3463 		INIT_LIST_HEAD(&dev_priv->perf.streams);
3464 		mutex_init(&dev_priv->perf.lock);
3465 		spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
3466 
3467 		oa_sample_rate_hard_limit =
3468 			dev_priv->perf.oa.timestamp_frequency / 2;
3469 		dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
3470 
3471 		mutex_init(&dev_priv->perf.metrics_lock);
3472 		idr_init(&dev_priv->perf.metrics_idr);
3473 
3474 		dev_priv->perf.initialized = true;
3475 	}
3476 }
3477 
destroy_config(int id,void * p,void * data)3478 static int destroy_config(int id, void *p, void *data)
3479 {
3480 	struct drm_i915_private *dev_priv = data;
3481 	struct i915_oa_config *oa_config = p;
3482 
3483 	put_oa_config(dev_priv, oa_config);
3484 
3485 	return 0;
3486 }
3487 
3488 /**
3489  * i915_perf_fini - Counter part to i915_perf_init()
3490  * @dev_priv: i915 device instance
3491  */
i915_perf_fini(struct drm_i915_private * dev_priv)3492 void i915_perf_fini(struct drm_i915_private *dev_priv)
3493 {
3494 	if (!dev_priv->perf.initialized)
3495 		return;
3496 
3497 	idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
3498 	idr_destroy(&dev_priv->perf.metrics_idr);
3499 
3500 	unregister_sysctl_table(dev_priv->perf.sysctl_header);
3501 
3502 	memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
3503 
3504 	dev_priv->perf.initialized = false;
3505 }
3506