1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3 /*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32
33 #include <uapi/drm/i915_drm.h>
34 #include <uapi/drm/drm_fourcc.h>
35
36 #include <asm/hypervisor.h>
37
38 #include <linux/io-mapping.h>
39 #include <linux/i2c.h>
40 #include <linux/i2c-algo-bit.h>
41 #include <linux/backlight.h>
42 #include <linux/hash.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/kref.h>
45 #include <linux/mm_types.h>
46 #include <linux/perf_event.h>
47 #include <linux/pm_qos.h>
48 #include <linux/dma-resv.h>
49 #include <linux/shmem_fs.h>
50 #include <linux/stackdepot.h>
51 #include <linux/xarray.h>
52
53 #include <drm/intel-gtt.h>
54 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
55 #include <drm/drm_gem.h>
56 #include <drm/drm_auth.h>
57 #include <drm/drm_cache.h>
58 #include <drm/drm_util.h>
59 #include <drm/drm_dsc.h>
60 #include <drm/drm_atomic.h>
61 #include <drm/drm_connector.h>
62 #include <drm/i915_mei_hdcp_interface.h>
63
64 #include "i915_params.h"
65 #include "i915_reg.h"
66 #include "i915_utils.h"
67
68 #include "display/intel_bios.h"
69 #include "display/intel_display.h"
70 #include "display/intel_display_power.h"
71 #include "display/intel_dpll_mgr.h"
72 #include "display/intel_dsb.h"
73 #include "display/intel_frontbuffer.h"
74 #include "display/intel_global_state.h"
75 #include "display/intel_gmbus.h"
76 #include "display/intel_opregion.h"
77
78 #include "gem/i915_gem_context_types.h"
79 #include "gem/i915_gem_shrinker.h"
80 #include "gem/i915_gem_stolen.h"
81
82 #include "gt/intel_lrc.h"
83 #include "gt/intel_engine.h"
84 #include "gt/intel_gt_types.h"
85 #include "gt/intel_workarounds.h"
86 #include "gt/uc/intel_uc.h"
87
88 #include "intel_device_info.h"
89 #include "intel_pch.h"
90 #include "intel_runtime_pm.h"
91 #include "intel_memory_region.h"
92 #include "intel_uncore.h"
93 #include "intel_wakeref.h"
94 #include "intel_wopcm.h"
95
96 #include "i915_gem.h"
97 #include "i915_gem_gtt.h"
98 #include "i915_gpu_error.h"
99 #include "i915_perf_types.h"
100 #include "i915_request.h"
101 #include "i915_scheduler.h"
102 #include "gt/intel_timeline.h"
103 #include "i915_vma.h"
104 #include "i915_irq.h"
105
106 #include "intel_region_lmem.h"
107
108 /* General customization:
109 */
110
111 #define DRIVER_NAME "i915"
112 #define DRIVER_DESC "Intel Graphics"
113 #define DRIVER_DATE "20200917"
114 #define DRIVER_TIMESTAMP 1600375437
115
116 struct drm_i915_gem_object;
117
118 enum hpd_pin {
119 HPD_NONE = 0,
120 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
121 HPD_CRT,
122 HPD_SDVO_B,
123 HPD_SDVO_C,
124 HPD_PORT_A,
125 HPD_PORT_B,
126 HPD_PORT_C,
127 HPD_PORT_D,
128 HPD_PORT_E,
129 HPD_PORT_TC1,
130 HPD_PORT_TC2,
131 HPD_PORT_TC3,
132 HPD_PORT_TC4,
133 HPD_PORT_TC5,
134 HPD_PORT_TC6,
135
136 HPD_NUM_PINS
137 };
138
139 #define for_each_hpd_pin(__pin) \
140 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
141
142 /* Threshold == 5 for long IRQs, 50 for short */
143 #define HPD_STORM_DEFAULT_THRESHOLD 50
144
145 struct i915_hotplug {
146 struct delayed_work hotplug_work;
147
148 const u32 *hpd, *pch_hpd;
149
150 struct {
151 unsigned long last_jiffies;
152 int count;
153 enum {
154 HPD_ENABLED = 0,
155 HPD_DISABLED = 1,
156 HPD_MARK_DISABLED = 2
157 } state;
158 } stats[HPD_NUM_PINS];
159 u32 event_bits;
160 u32 retry_bits;
161 struct delayed_work reenable_work;
162
163 u32 long_port_mask;
164 u32 short_port_mask;
165 struct work_struct dig_port_work;
166
167 struct work_struct poll_init_work;
168 bool poll_enabled;
169
170 unsigned int hpd_storm_threshold;
171 /* Whether or not to count short HPD IRQs in HPD storms */
172 u8 hpd_short_storm_enabled;
173
174 /*
175 * if we get a HPD irq from DP and a HPD irq from non-DP
176 * the non-DP HPD could block the workqueue on a mode config
177 * mutex getting, that userspace may have taken. However
178 * userspace is waiting on the DP workqueue to run which is
179 * blocked behind the non-DP one.
180 */
181 struct workqueue_struct *dp_wq;
182 };
183
184 #define I915_GEM_GPU_DOMAINS \
185 (I915_GEM_DOMAIN_RENDER | \
186 I915_GEM_DOMAIN_SAMPLER | \
187 I915_GEM_DOMAIN_COMMAND | \
188 I915_GEM_DOMAIN_INSTRUCTION | \
189 I915_GEM_DOMAIN_VERTEX)
190
191 struct drm_i915_private;
192 struct i915_mm_struct;
193 struct i915_mmu_object;
194
195 struct drm_i915_file_private {
196 struct drm_i915_private *dev_priv;
197
198 union {
199 struct drm_file *file;
200 struct rcu_head rcu;
201 };
202
203 struct xarray context_xa;
204 struct xarray vm_xa;
205
206 unsigned int bsd_engine;
207
208 /*
209 * Every context ban increments per client ban score. Also
210 * hangs in short succession increments ban score. If ban threshold
211 * is reached, client is considered banned and submitting more work
212 * will fail. This is a stop gap measure to limit the badly behaving
213 * clients access to gpu. Note that unbannable contexts never increment
214 * the client ban score.
215 */
216 #define I915_CLIENT_SCORE_HANG_FAST 1
217 #define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
218 #define I915_CLIENT_SCORE_CONTEXT_BAN 3
219 #define I915_CLIENT_SCORE_BANNED 9
220 /** ban_score: Accumulated score of all ctx bans and fast hangs. */
221 atomic_t ban_score;
222 unsigned long hang_timestamp;
223 };
224
225 /* Interface history:
226 *
227 * 1.1: Original.
228 * 1.2: Add Power Management
229 * 1.3: Add vblank support
230 * 1.4: Fix cmdbuffer path, add heap destroy
231 * 1.5: Add vblank pipe configuration
232 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
233 * - Support vertical blank on secondary display pipe
234 */
235 #define DRIVER_MAJOR 1
236 #define DRIVER_MINOR 6
237 #define DRIVER_PATCHLEVEL 0
238
239 struct intel_overlay;
240 struct intel_overlay_error_state;
241
242 struct sdvo_device_mapping {
243 u8 initialized;
244 u8 dvo_port;
245 u8 slave_addr;
246 u8 dvo_wiring;
247 u8 i2c_pin;
248 u8 ddc_pin;
249 };
250
251 struct intel_connector;
252 struct intel_encoder;
253 struct intel_atomic_state;
254 struct intel_cdclk_config;
255 struct intel_cdclk_state;
256 struct intel_cdclk_vals;
257 struct intel_initial_plane_config;
258 struct intel_crtc;
259 struct intel_limit;
260 struct dpll;
261
262 struct drm_i915_display_funcs {
263 void (*get_cdclk)(struct drm_i915_private *dev_priv,
264 struct intel_cdclk_config *cdclk_config);
265 void (*set_cdclk)(struct drm_i915_private *dev_priv,
266 const struct intel_cdclk_config *cdclk_config,
267 enum pipe pipe);
268 int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
269 int (*get_fifo_size)(struct drm_i915_private *dev_priv,
270 enum i9xx_plane_id i9xx_plane);
271 int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
272 int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
273 void (*initial_watermarks)(struct intel_atomic_state *state,
274 struct intel_crtc *crtc);
275 void (*atomic_update_watermarks)(struct intel_atomic_state *state,
276 struct intel_crtc *crtc);
277 void (*optimize_watermarks)(struct intel_atomic_state *state,
278 struct intel_crtc *crtc);
279 int (*compute_global_watermarks)(struct intel_atomic_state *state);
280 void (*update_wm)(struct intel_crtc *crtc);
281 int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
282 u8 (*calc_voltage_level)(int cdclk);
283 /* Returns the active state of the crtc, and if the crtc is active,
284 * fills out the pipe-config with the hw state. */
285 bool (*get_pipe_config)(struct intel_crtc *,
286 struct intel_crtc_state *);
287 void (*get_initial_plane_config)(struct intel_crtc *,
288 struct intel_initial_plane_config *);
289 int (*crtc_compute_clock)(struct intel_crtc *crtc,
290 struct intel_crtc_state *crtc_state);
291 void (*crtc_enable)(struct intel_atomic_state *state,
292 struct intel_crtc *crtc);
293 void (*crtc_disable)(struct intel_atomic_state *state,
294 struct intel_crtc *crtc);
295 void (*commit_modeset_enables)(struct intel_atomic_state *state);
296 void (*commit_modeset_disables)(struct intel_atomic_state *state);
297 void (*audio_codec_enable)(struct intel_encoder *encoder,
298 const struct intel_crtc_state *crtc_state,
299 const struct drm_connector_state *conn_state);
300 void (*audio_codec_disable)(struct intel_encoder *encoder,
301 const struct intel_crtc_state *old_crtc_state,
302 const struct drm_connector_state *old_conn_state);
303 void (*fdi_link_train)(struct intel_crtc *crtc,
304 const struct intel_crtc_state *crtc_state);
305 void (*init_clock_gating)(struct drm_i915_private *dev_priv);
306 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
307 /* clock updates for mode set */
308 /* cursor updates */
309 /* render clock increase/decrease */
310 /* display clock increase/decrease */
311 /* pll clock increase/decrease */
312
313 int (*color_check)(struct intel_crtc_state *crtc_state);
314 /*
315 * Program double buffered color management registers during
316 * vblank evasion. The registers should then latch during the
317 * next vblank start, alongside any other double buffered registers
318 * involved with the same commit.
319 */
320 void (*color_commit)(const struct intel_crtc_state *crtc_state);
321 /*
322 * Load LUTs (and other single buffered color management
323 * registers). Will (hopefully) be called during the vblank
324 * following the latching of any double buffered registers
325 * involved with the same commit.
326 */
327 void (*load_luts)(const struct intel_crtc_state *crtc_state);
328 void (*read_luts)(struct intel_crtc_state *crtc_state);
329 };
330
331 struct intel_csr {
332 struct work_struct work;
333 const char *fw_path;
334 u32 required_version;
335 u32 max_fw_size; /* bytes */
336 u32 *dmc_payload;
337 u32 dmc_fw_size; /* dwords */
338 u32 version;
339 u32 mmio_count;
340 i915_reg_t mmioaddr[20];
341 u32 mmiodata[20];
342 u32 dc_state;
343 u32 target_dc_state;
344 u32 allowed_dc_mask;
345 intel_wakeref_t wakeref;
346 };
347
348 enum i915_cache_level {
349 I915_CACHE_NONE = 0,
350 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
351 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
352 caches, eg sampler/render caches, and the
353 large Last-Level-Cache. LLC is coherent with
354 the CPU, but L3 is only visible to the GPU. */
355 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
356 };
357
358 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
359
360 struct intel_fbc {
361 /* This is always the inner lock when overlapping with struct_mutex and
362 * it's the outer lock when overlapping with stolen_lock. */
363 struct mutex lock;
364 unsigned threshold;
365 unsigned int possible_framebuffer_bits;
366 unsigned int busy_bits;
367 struct intel_crtc *crtc;
368
369 struct drm_mm_node compressed_fb;
370 struct drm_mm_node *compressed_llb;
371
372 bool false_color;
373
374 bool active;
375 bool activated;
376 bool flip_pending;
377
378 bool underrun_detected;
379 struct work_struct underrun_work;
380
381 /*
382 * Due to the atomic rules we can't access some structures without the
383 * appropriate locking, so we cache information here in order to avoid
384 * these problems.
385 */
386 struct intel_fbc_state_cache {
387 struct {
388 unsigned int mode_flags;
389 u32 hsw_bdw_pixel_rate;
390 } crtc;
391
392 struct {
393 unsigned int rotation;
394 int src_w;
395 int src_h;
396 bool visible;
397 /*
398 * Display surface base address adjustement for
399 * pageflips. Note that on gen4+ this only adjusts up
400 * to a tile, offsets within a tile are handled in
401 * the hw itself (with the TILEOFF register).
402 */
403 int adjusted_x;
404 int adjusted_y;
405
406 u16 pixel_blend_mode;
407 } plane;
408
409 struct {
410 const struct drm_format_info *format;
411 unsigned int stride;
412 u64 modifier;
413 } fb;
414
415 unsigned int fence_y_offset;
416 u16 gen9_wa_cfb_stride;
417 u16 interval;
418 s8 fence_id;
419 } state_cache;
420
421 /*
422 * This structure contains everything that's relevant to program the
423 * hardware registers. When we want to figure out if we need to disable
424 * and re-enable FBC for a new configuration we just check if there's
425 * something different in the struct. The genx_fbc_activate functions
426 * are supposed to read from it in order to program the registers.
427 */
428 struct intel_fbc_reg_params {
429 struct {
430 enum pipe pipe;
431 enum i9xx_plane_id i9xx_plane;
432 } crtc;
433
434 struct {
435 const struct drm_format_info *format;
436 unsigned int stride;
437 u64 modifier;
438 } fb;
439
440 int cfb_size;
441 unsigned int fence_y_offset;
442 u16 gen9_wa_cfb_stride;
443 u16 interval;
444 s8 fence_id;
445 bool plane_visible;
446 } params;
447
448 const char *no_fbc_reason;
449 };
450
451 /*
452 * HIGH_RR is the highest eDP panel refresh rate read from EDID
453 * LOW_RR is the lowest eDP panel refresh rate found from EDID
454 * parsing for same resolution.
455 */
456 enum drrs_refresh_rate_type {
457 DRRS_HIGH_RR,
458 DRRS_LOW_RR,
459 DRRS_MAX_RR, /* RR count */
460 };
461
462 enum drrs_support_type {
463 DRRS_NOT_SUPPORTED = 0,
464 STATIC_DRRS_SUPPORT = 1,
465 SEAMLESS_DRRS_SUPPORT = 2
466 };
467
468 struct intel_dp;
469 struct i915_drrs {
470 struct mutex mutex;
471 struct delayed_work work;
472 struct intel_dp *dp;
473 unsigned busy_frontbuffer_bits;
474 enum drrs_refresh_rate_type refresh_rate_type;
475 enum drrs_support_type type;
476 };
477
478 struct i915_psr {
479 struct mutex lock;
480
481 #define I915_PSR_DEBUG_MODE_MASK 0x0f
482 #define I915_PSR_DEBUG_DEFAULT 0x00
483 #define I915_PSR_DEBUG_DISABLE 0x01
484 #define I915_PSR_DEBUG_ENABLE 0x02
485 #define I915_PSR_DEBUG_FORCE_PSR1 0x03
486 #define I915_PSR_DEBUG_IRQ 0x10
487
488 u32 debug;
489 bool sink_support;
490 bool enabled;
491 struct intel_dp *dp;
492 enum pipe pipe;
493 enum transcoder transcoder;
494 bool active;
495 struct work_struct work;
496 unsigned busy_frontbuffer_bits;
497 bool sink_psr2_support;
498 bool link_standby;
499 bool colorimetry_support;
500 bool psr2_enabled;
501 bool psr2_sel_fetch_enabled;
502 u8 sink_sync_latency;
503 ktime_t last_entry_attempt;
504 ktime_t last_exit;
505 bool sink_not_reliable;
506 bool irq_aux_error;
507 u16 su_x_granularity;
508 bool dc3co_enabled;
509 u32 dc3co_exit_delay;
510 struct delayed_work dc3co_work;
511 bool force_mode_changed;
512 struct drm_dp_vsc_sdp vsc;
513 };
514
515 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
516 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
517 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
518 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
519 #define QUIRK_INCREASE_T12_DELAY (1<<6)
520 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
521
522 struct intel_fbdev;
523 struct intel_fbc_work;
524
525 struct intel_gmbus {
526 struct i2c_adapter adapter;
527 #define GMBUS_FORCE_BIT_RETRY (1U << 31)
528 u32 force_bit;
529 u32 reg0;
530 i915_reg_t gpio_reg;
531 struct i2c_algo_bit_data bit_algo;
532 struct drm_i915_private *dev_priv;
533 };
534
535 struct i915_suspend_saved_registers {
536 u32 saveDSPARB;
537 u32 saveSWF0[16];
538 u32 saveSWF1[16];
539 u32 saveSWF3[3];
540 u16 saveGCDGMBUS;
541 };
542
543 struct vlv_s0ix_state;
544
545 #define MAX_L3_SLICES 2
546 struct intel_l3_parity {
547 u32 *remap_info[MAX_L3_SLICES];
548 struct work_struct error_work;
549 int which_slice;
550 };
551
552 struct i915_gem_mm {
553 /** Memory allocator for GTT stolen memory */
554 struct drm_mm stolen;
555 /** Protects the usage of the GTT stolen memory allocator. This is
556 * always the inner lock when overlapping with struct_mutex. */
557 struct mutex stolen_lock;
558
559 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
560 spinlock_t obj_lock;
561
562 /**
563 * List of objects which are purgeable.
564 */
565 struct list_head purge_list;
566
567 /**
568 * List of objects which have allocated pages and are shrinkable.
569 */
570 struct list_head shrink_list;
571
572 /**
573 * List of objects which are pending destruction.
574 */
575 struct llist_head free_list;
576 struct work_struct free_work;
577 /**
578 * Count of objects pending destructions. Used to skip needlessly
579 * waiting on an RCU barrier if no objects are waiting to be freed.
580 */
581 atomic_t free_count;
582
583 /**
584 * tmpfs instance used for shmem backed objects
585 */
586 struct vfsmount *gemfs;
587
588 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
589
590 struct notifier_block oom_notifier;
591 struct notifier_block vmap_notifier;
592 struct shrinker shrinker;
593
594 /**
595 * Workqueue to fault in userptr pages, flushed by the execbuf
596 * when required but otherwise left to userspace to try again
597 * on EAGAIN.
598 */
599 struct workqueue_struct *userptr_wq;
600
601 /* shrinker accounting, also useful for userland debugging */
602 u64 shrink_memory;
603 u32 shrink_count;
604 };
605
606 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
607
608 unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
609 u64 context);
610
611 static inline unsigned long
i915_fence_timeout(const struct drm_i915_private * i915)612 i915_fence_timeout(const struct drm_i915_private *i915)
613 {
614 return i915_fence_context_timeout(i915, U64_MAX);
615 }
616
617 /* Amount of SAGV/QGV points, BSpec precisely defines this */
618 #define I915_NUM_QGV_POINTS 8
619
620 struct ddi_vbt_port_info {
621 /* Non-NULL if port present. */
622 const struct child_device_config *child;
623
624 int max_tmds_clock;
625
626 /* This is an index in the HDMI/DVI DDI buffer translation table. */
627 u8 hdmi_level_shift;
628 u8 hdmi_level_shift_set:1;
629
630 u8 supports_dvi:1;
631 u8 supports_hdmi:1;
632 u8 supports_dp:1;
633 u8 supports_edp:1;
634 u8 supports_typec_usb:1;
635 u8 supports_tbt:1;
636
637 u8 alternate_aux_channel;
638 u8 alternate_ddc_pin;
639
640 u8 dp_boost_level;
641 u8 hdmi_boost_level;
642 int dp_max_link_rate; /* 0 for not limited by VBT */
643 };
644
645 enum psr_lines_to_wait {
646 PSR_0_LINES_TO_WAIT = 0,
647 PSR_1_LINE_TO_WAIT,
648 PSR_4_LINES_TO_WAIT,
649 PSR_8_LINES_TO_WAIT
650 };
651
652 struct intel_vbt_data {
653 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
654 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
655
656 /* Feature bits */
657 unsigned int int_tv_support:1;
658 unsigned int lvds_dither:1;
659 unsigned int int_crt_support:1;
660 unsigned int lvds_use_ssc:1;
661 unsigned int int_lvds_support:1;
662 unsigned int display_clock_mode:1;
663 unsigned int fdi_rx_polarity_inverted:1;
664 unsigned int panel_type:4;
665 int lvds_ssc_freq;
666 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
667 enum drm_panel_orientation orientation;
668
669 enum drrs_support_type drrs_type;
670
671 struct {
672 int rate;
673 int lanes;
674 int preemphasis;
675 int vswing;
676 bool low_vswing;
677 bool initialized;
678 int bpp;
679 struct edp_power_seq pps;
680 bool hobl;
681 } edp;
682
683 struct {
684 bool enable;
685 bool full_link;
686 bool require_aux_wakeup;
687 int idle_frames;
688 enum psr_lines_to_wait lines_to_wait;
689 int tp1_wakeup_time_us;
690 int tp2_tp3_wakeup_time_us;
691 int psr2_tp2_tp3_wakeup_time_us;
692 } psr;
693
694 struct {
695 u16 pwm_freq_hz;
696 bool present;
697 bool active_low_pwm;
698 u8 min_brightness; /* min_brightness/255 of max */
699 u8 controller; /* brightness controller number */
700 enum intel_backlight_type type;
701 } backlight;
702
703 /* MIPI DSI */
704 struct {
705 u16 panel_id;
706 struct mipi_config *config;
707 struct mipi_pps_data *pps;
708 u16 bl_ports;
709 u16 cabc_ports;
710 u8 seq_version;
711 u32 size;
712 u8 *data;
713 const u8 *sequence[MIPI_SEQ_MAX];
714 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
715 enum drm_panel_orientation orientation;
716 } dsi;
717
718 int crt_ddc_pin;
719
720 struct list_head display_devices;
721
722 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
723 struct sdvo_device_mapping sdvo_mappings[2];
724 };
725
726 enum intel_ddb_partitioning {
727 INTEL_DDB_PART_1_2,
728 INTEL_DDB_PART_5_6, /* IVB+ */
729 };
730
731 struct ilk_wm_values {
732 u32 wm_pipe[3];
733 u32 wm_lp[3];
734 u32 wm_lp_spr[3];
735 bool enable_fbc_wm;
736 enum intel_ddb_partitioning partitioning;
737 };
738
739 struct g4x_pipe_wm {
740 u16 plane[I915_MAX_PLANES];
741 u16 fbc;
742 };
743
744 struct g4x_sr_wm {
745 u16 plane;
746 u16 cursor;
747 u16 fbc;
748 };
749
750 struct vlv_wm_ddl_values {
751 u8 plane[I915_MAX_PLANES];
752 };
753
754 struct vlv_wm_values {
755 struct g4x_pipe_wm pipe[3];
756 struct g4x_sr_wm sr;
757 struct vlv_wm_ddl_values ddl[3];
758 u8 level;
759 bool cxsr;
760 };
761
762 struct g4x_wm_values {
763 struct g4x_pipe_wm pipe[2];
764 struct g4x_sr_wm sr;
765 struct g4x_sr_wm hpll;
766 bool cxsr;
767 bool hpll_en;
768 bool fbc_en;
769 };
770
771 struct skl_ddb_entry {
772 u16 start, end; /* in number of blocks, 'end' is exclusive */
773 };
774
skl_ddb_entry_size(const struct skl_ddb_entry * entry)775 static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
776 {
777 return entry->end - entry->start;
778 }
779
skl_ddb_entry_equal(const struct skl_ddb_entry * e1,const struct skl_ddb_entry * e2)780 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
781 const struct skl_ddb_entry *e2)
782 {
783 if (e1->start == e2->start && e1->end == e2->end)
784 return true;
785
786 return false;
787 }
788
789 struct i915_frontbuffer_tracking {
790 spinlock_t lock;
791
792 /*
793 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
794 * scheduled flips.
795 */
796 unsigned busy_bits;
797 unsigned flip_bits;
798 };
799
800 struct i915_virtual_gpu {
801 struct mutex lock; /* serialises sending of g2v_notify command pkts */
802 bool active;
803 u32 caps;
804 };
805
806 struct intel_cdclk_config {
807 unsigned int cdclk, vco, ref, bypass;
808 u8 voltage_level;
809 };
810
811 struct i915_selftest_stash {
812 atomic_t counter;
813 };
814
815 struct drm_i915_private {
816 struct drm_device drm;
817
818 /* FIXME: Device release actions should all be moved to drmm_ */
819 bool do_release;
820
821 /* i915 device parameters */
822 struct i915_params params;
823
824 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
825 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
826 struct intel_driver_caps caps;
827
828 /**
829 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
830 * end of stolen which we can optionally use to create GEM objects
831 * backed by stolen memory. Note that stolen_usable_size tells us
832 * exactly how much of this we are actually allowed to use, given that
833 * some portion of it is in fact reserved for use by hardware functions.
834 */
835 struct resource dsm;
836 /**
837 * Reseved portion of Data Stolen Memory
838 */
839 struct resource dsm_reserved;
840
841 /*
842 * Stolen memory is segmented in hardware with different portions
843 * offlimits to certain functions.
844 *
845 * The drm_mm is initialised to the total accessible range, as found
846 * from the PCI config. On Broadwell+, this is further restricted to
847 * avoid the first page! The upper end of stolen memory is reserved for
848 * hardware functions and similarly removed from the accessible range.
849 */
850 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
851
852 struct intel_uncore uncore;
853 struct intel_uncore_mmio_debug mmio_debug;
854
855 struct i915_virtual_gpu vgpu;
856
857 struct intel_gvt *gvt;
858
859 struct intel_wopcm wopcm;
860
861 struct intel_csr csr;
862
863 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
864
865 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
866 * controller on different i2c buses. */
867 struct mutex gmbus_mutex;
868
869 /**
870 * Base address of where the gmbus and gpio blocks are located (either
871 * on PCH or on SoC for platforms without PCH).
872 */
873 u32 gpio_mmio_base;
874
875 u32 hsw_psr_mmio_adjust;
876
877 /* MMIO base address for MIPI regs */
878 u32 mipi_mmio_base;
879
880 u32 pps_mmio_base;
881
882 wait_queue_head_t gmbus_wait_queue;
883
884 struct pci_dev *bridge_dev;
885
886 struct rb_root uabi_engines;
887
888 struct resource mch_res;
889
890 /* protects the irq masks */
891 spinlock_t irq_lock;
892
893 bool display_irqs_enabled;
894
895 /* Sideband mailbox protection */
896 struct mutex sb_lock;
897 struct pm_qos_request sb_qos;
898
899 /** Cached value of IMR to avoid reads in updating the bitfield */
900 union {
901 u32 irq_mask;
902 u32 de_irq_mask[I915_MAX_PIPES];
903 };
904 u32 pipestat_irq_mask[I915_MAX_PIPES];
905
906 struct i915_hotplug hotplug;
907 struct intel_fbc fbc;
908 struct i915_drrs drrs;
909 struct intel_opregion opregion;
910 struct intel_vbt_data vbt;
911
912 bool preserve_bios_swizzle;
913
914 /* overlay */
915 struct intel_overlay *overlay;
916
917 /* backlight registers and fields in struct intel_panel */
918 struct mutex backlight_lock;
919
920 /* protects panel power sequencer state */
921 struct mutex pps_mutex;
922
923 unsigned int fsb_freq, mem_freq, is_ddr3;
924 unsigned int skl_preferred_vco_freq;
925 unsigned int max_cdclk_freq;
926
927 unsigned int max_dotclk_freq;
928 unsigned int hpll_freq;
929 unsigned int fdi_pll_freq;
930 unsigned int czclk_freq;
931
932 struct {
933 /* The current hardware cdclk configuration */
934 struct intel_cdclk_config hw;
935
936 /* cdclk, divider, and ratio table from bspec */
937 const struct intel_cdclk_vals *table;
938
939 struct intel_global_obj obj;
940 } cdclk;
941
942 struct {
943 /* The current hardware dbuf configuration */
944 u8 enabled_slices;
945
946 struct intel_global_obj obj;
947 } dbuf;
948
949 /**
950 * wq - Driver workqueue for GEM.
951 *
952 * NOTE: Work items scheduled here are not allowed to grab any modeset
953 * locks, for otherwise the flushing done in the pageflip code will
954 * result in deadlocks.
955 */
956 struct workqueue_struct *wq;
957
958 /* ordered wq for modesets */
959 struct workqueue_struct *modeset_wq;
960 /* unbound hipri wq for page flips/plane updates */
961 struct workqueue_struct *flip_wq;
962
963 /* Display functions */
964 struct drm_i915_display_funcs display;
965
966 /* PCH chipset type */
967 enum intel_pch pch_type;
968 unsigned short pch_id;
969
970 unsigned long quirks;
971
972 struct drm_atomic_state *modeset_restore_state;
973 struct drm_modeset_acquire_ctx reset_ctx;
974
975 struct i915_ggtt ggtt; /* VM representing the global address space */
976
977 struct i915_gem_mm mm;
978 DECLARE_HASHTABLE(mm_structs, 7);
979 spinlock_t mm_lock;
980
981 /* Kernel Modesetting */
982
983 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
984 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
985
986 /**
987 * dpll and cdclk state is protected by connection_mutex
988 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
989 * Must be global rather than per dpll, because on some platforms plls
990 * share registers.
991 */
992 struct {
993 struct mutex lock;
994
995 int num_shared_dpll;
996 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
997 const struct intel_dpll_mgr *mgr;
998
999 struct {
1000 int nssc;
1001 int ssc;
1002 } ref_clks;
1003 } dpll;
1004
1005 struct list_head global_obj_list;
1006
1007 /*
1008 * For reading active_pipes holding any crtc lock is
1009 * sufficient, for writing must hold all of them.
1010 */
1011 u8 active_pipes;
1012
1013 struct i915_wa_list gt_wa_list;
1014
1015 struct i915_frontbuffer_tracking fb_tracking;
1016
1017 struct intel_atomic_helper {
1018 struct llist_head free_list;
1019 struct work_struct free_work;
1020 } atomic_helper;
1021
1022 bool mchbar_need_disable;
1023
1024 struct intel_l3_parity l3_parity;
1025
1026 /*
1027 * HTI (aka HDPORT) state read during initial hw readout. Most
1028 * platforms don't have HTI, so this will just stay 0. Those that do
1029 * will use this later to figure out which PLLs and PHYs are unavailable
1030 * for driver usage.
1031 */
1032 u32 hti_state;
1033
1034 /*
1035 * edram size in MB.
1036 * Cannot be determined by PCIID. You must always read a register.
1037 */
1038 u32 edram_size_mb;
1039
1040 struct i915_power_domains power_domains;
1041
1042 struct i915_psr psr;
1043
1044 struct i915_gpu_error gpu_error;
1045
1046 struct drm_i915_gem_object *vlv_pctx;
1047
1048 /* list of fbdev register on this device */
1049 struct intel_fbdev *fbdev;
1050 struct work_struct fbdev_suspend_work;
1051
1052 struct drm_property *broadcast_rgb_property;
1053 struct drm_property *force_audio_property;
1054
1055 /* hda/i915 audio component */
1056 struct i915_audio_component *audio_component;
1057 bool audio_component_registered;
1058 /**
1059 * av_mutex - mutex for audio/video sync
1060 *
1061 */
1062 struct mutex av_mutex;
1063 int audio_power_refcount;
1064 u32 audio_freq_cntrl;
1065
1066 u32 fdi_rx_config;
1067
1068 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1069 u32 chv_phy_control;
1070 /*
1071 * Shadows for CHV DPLL_MD regs to keep the state
1072 * checker somewhat working in the presence hardware
1073 * crappiness (can't read out DPLL_MD for pipes B & C).
1074 */
1075 u32 chv_dpll_md[I915_MAX_PIPES];
1076 u32 bxt_phy_grc;
1077
1078 u32 suspend_count;
1079 bool power_domains_suspended;
1080 struct i915_suspend_saved_registers regfile;
1081 struct vlv_s0ix_state *vlv_s0ix_state;
1082
1083 enum {
1084 I915_SAGV_UNKNOWN = 0,
1085 I915_SAGV_DISABLED,
1086 I915_SAGV_ENABLED,
1087 I915_SAGV_NOT_CONTROLLED
1088 } sagv_status;
1089
1090 u32 sagv_block_time_us;
1091
1092 struct {
1093 /*
1094 * Raw watermark latency values:
1095 * in 0.1us units for WM0,
1096 * in 0.5us units for WM1+.
1097 */
1098 /* primary */
1099 u16 pri_latency[5];
1100 /* sprite */
1101 u16 spr_latency[5];
1102 /* cursor */
1103 u16 cur_latency[5];
1104 /*
1105 * Raw watermark memory latency values
1106 * for SKL for all 8 levels
1107 * in 1us units.
1108 */
1109 u16 skl_latency[8];
1110
1111 /* current hardware state */
1112 union {
1113 struct ilk_wm_values hw;
1114 struct vlv_wm_values vlv;
1115 struct g4x_wm_values g4x;
1116 };
1117
1118 u8 max_level;
1119
1120 /*
1121 * Should be held around atomic WM register writing; also
1122 * protects * intel_crtc->wm.active and
1123 * crtc_state->wm.need_postvbl_update.
1124 */
1125 struct mutex wm_mutex;
1126
1127 /*
1128 * Set during HW readout of watermarks/DDB. Some platforms
1129 * need to know when we're still using BIOS-provided values
1130 * (which we don't fully trust).
1131 *
1132 * FIXME get rid of this.
1133 */
1134 bool distrust_bios_wm;
1135 } wm;
1136
1137 struct dram_info {
1138 bool valid;
1139 bool is_16gb_dimm;
1140 u8 num_channels;
1141 u8 ranks;
1142 u32 bandwidth_kbps;
1143 bool symmetric_memory;
1144 enum intel_dram_type {
1145 INTEL_DRAM_UNKNOWN,
1146 INTEL_DRAM_DDR3,
1147 INTEL_DRAM_DDR4,
1148 INTEL_DRAM_LPDDR3,
1149 INTEL_DRAM_LPDDR4
1150 } type;
1151 u8 num_qgv_points;
1152 } dram_info;
1153
1154 struct intel_bw_info {
1155 /* for each QGV point */
1156 unsigned int deratedbw[I915_NUM_QGV_POINTS];
1157 u8 num_qgv_points;
1158 u8 num_planes;
1159 } max_bw[6];
1160
1161 struct intel_global_obj bw_obj;
1162
1163 struct intel_runtime_pm runtime_pm;
1164
1165 struct i915_perf perf;
1166
1167 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1168 struct intel_gt gt;
1169
1170 struct {
1171 struct i915_gem_contexts {
1172 spinlock_t lock; /* locks list */
1173 struct list_head list;
1174
1175 struct llist_head free_list;
1176 struct work_struct free_work;
1177 } contexts;
1178
1179 /*
1180 * We replace the local file with a global mappings as the
1181 * backing storage for the mmap is on the device and not
1182 * on the struct file, and we do not want to prolong the
1183 * lifetime of the local fd. To minimise the number of
1184 * anonymous inodes we create, we use a global singleton to
1185 * share the global mapping.
1186 */
1187 struct file *mmap_singleton;
1188 } gem;
1189
1190 u8 pch_ssc_use;
1191
1192 /* For i915gm/i945gm vblank irq workaround */
1193 u8 vblank_enabled;
1194
1195 /* perform PHY state sanity checks? */
1196 bool chv_phy_assert[2];
1197
1198 bool ipc_enabled;
1199
1200 /* Used to save the pipe-to-encoder mapping for audio */
1201 struct intel_encoder *av_enc_map[I915_MAX_PIPES];
1202
1203 /* necessary resource sharing with HDMI LPE audio driver. */
1204 struct {
1205 struct platform_device *platdev;
1206 int irq;
1207 } lpe_audio;
1208
1209 struct i915_pmu pmu;
1210
1211 struct i915_hdcp_comp_master *hdcp_master;
1212 bool hdcp_comp_added;
1213
1214 /* Mutex to protect the above hdcp component related values. */
1215 struct mutex hdcp_comp_mutex;
1216
1217 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
1218
1219 /*
1220 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1221 * will be rejected. Instead look for a better place.
1222 */
1223 };
1224
to_i915(const struct drm_device * dev)1225 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1226 {
1227 return container_of(dev, struct drm_i915_private, drm);
1228 }
1229
kdev_to_i915(struct device * kdev)1230 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
1231 {
1232 return dev_get_drvdata(kdev);
1233 }
1234
pdev_to_i915(struct pci_dev * pdev)1235 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
1236 {
1237 return pci_get_drvdata(pdev);
1238 }
1239
1240 /* Simple iterator over all initialised engines */
1241 #define for_each_engine(engine__, dev_priv__, id__) \
1242 for ((id__) = 0; \
1243 (id__) < I915_NUM_ENGINES; \
1244 (id__)++) \
1245 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
1246
1247 /* Iterator over subset of engines selected by mask */
1248 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
1249 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
1250 (tmp__) ? \
1251 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
1252 0;)
1253
1254 #define rb_to_uabi_engine(rb) \
1255 rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
1256
1257 #define for_each_uabi_engine(engine__, i915__) \
1258 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1259 (engine__); \
1260 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1261
1262 #define for_each_uabi_class_engine(engine__, class__, i915__) \
1263 for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
1264 (engine__) && (engine__)->uabi_class == (class__); \
1265 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1266
1267 #define I915_GTT_OFFSET_NONE ((u32)-1)
1268
1269 /*
1270 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1271 * considered to be the frontbuffer for the given plane interface-wise. This
1272 * doesn't mean that the hw necessarily already scans it out, but that any
1273 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1274 *
1275 * We have one bit per pipe and per scanout plane type.
1276 */
1277 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
1278 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
1279 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
1280 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
1281 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
1282 })
1283 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1284 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1285 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1286 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1287 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1288
1289 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
1290 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
1291 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
1292
1293 #define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
1294 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
1295
1296 #define REVID_FOREVER 0xff
1297 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
1298
1299 #define INTEL_GEN_MASK(s, e) ( \
1300 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
1301 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
1302 GENMASK((e) - 1, (s) - 1))
1303
1304 /* Returns true if Gen is in inclusive range [Start, End] */
1305 #define IS_GEN_RANGE(dev_priv, s, e) \
1306 (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
1307
1308 #define IS_GEN(dev_priv, n) \
1309 (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
1310 INTEL_INFO(dev_priv)->gen == (n))
1311
1312 #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
1313
1314 /*
1315 * Return true if revision is in range [since,until] inclusive.
1316 *
1317 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
1318 */
1319 #define IS_REVID(p, since, until) \
1320 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
1321
1322 static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info * info,enum intel_platform p)1323 __platform_mask_index(const struct intel_runtime_info *info,
1324 enum intel_platform p)
1325 {
1326 const unsigned int pbits =
1327 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1328
1329 /* Expand the platform_mask array if this fails. */
1330 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1331 pbits * ARRAY_SIZE(info->platform_mask));
1332
1333 return p / pbits;
1334 }
1335
1336 static __always_inline unsigned int
__platform_mask_bit(const struct intel_runtime_info * info,enum intel_platform p)1337 __platform_mask_bit(const struct intel_runtime_info *info,
1338 enum intel_platform p)
1339 {
1340 const unsigned int pbits =
1341 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1342
1343 return p % pbits + INTEL_SUBPLATFORM_BITS;
1344 }
1345
1346 static inline u32
intel_subplatform(const struct intel_runtime_info * info,enum intel_platform p)1347 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1348 {
1349 const unsigned int pi = __platform_mask_index(info, p);
1350
1351 return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
1352 }
1353
1354 static __always_inline bool
IS_PLATFORM(const struct drm_i915_private * i915,enum intel_platform p)1355 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1356 {
1357 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1358 const unsigned int pi = __platform_mask_index(info, p);
1359 const unsigned int pb = __platform_mask_bit(info, p);
1360
1361 BUILD_BUG_ON(!__builtin_constant_p(p));
1362
1363 return info->platform_mask[pi] & BIT(pb);
1364 }
1365
1366 static __always_inline bool
IS_SUBPLATFORM(const struct drm_i915_private * i915,enum intel_platform p,unsigned int s)1367 IS_SUBPLATFORM(const struct drm_i915_private *i915,
1368 enum intel_platform p, unsigned int s)
1369 {
1370 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1371 const unsigned int pi = __platform_mask_index(info, p);
1372 const unsigned int pb = __platform_mask_bit(info, p);
1373 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1374 const u32 mask = info->platform_mask[pi];
1375
1376 BUILD_BUG_ON(!__builtin_constant_p(p));
1377 BUILD_BUG_ON(!__builtin_constant_p(s));
1378 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1379
1380 /* Shift and test on the MSB position so sign flag can be used. */
1381 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1382 }
1383
1384 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
1385 #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
1386
1387 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
1388 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
1389 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
1390 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
1391 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
1392 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
1393 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
1394 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
1395 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
1396 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
1397 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
1398 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
1399 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
1400 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1401 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
1402 #define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1403 #define IS_IRONLAKE_M(dev_priv) \
1404 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
1405 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
1406 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
1407 INTEL_INFO(dev_priv)->gt == 1)
1408 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1409 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1410 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
1411 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1412 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1413 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
1414 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1415 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1416 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1417 #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
1418 #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
1419 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
1420 #define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
1421 #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1422 #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
1423 #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
1424 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1425 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
1426 #define IS_BDW_ULT(dev_priv) \
1427 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1428 #define IS_BDW_ULX(dev_priv) \
1429 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
1430 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
1431 INTEL_INFO(dev_priv)->gt == 3)
1432 #define IS_HSW_ULT(dev_priv) \
1433 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
1434 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
1435 INTEL_INFO(dev_priv)->gt == 3)
1436 #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
1437 INTEL_INFO(dev_priv)->gt == 1)
1438 /* ULX machines are also considered ULT. */
1439 #define IS_HSW_ULX(dev_priv) \
1440 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1441 #define IS_SKL_ULT(dev_priv) \
1442 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1443 #define IS_SKL_ULX(dev_priv) \
1444 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1445 #define IS_KBL_ULT(dev_priv) \
1446 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1447 #define IS_KBL_ULX(dev_priv) \
1448 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
1449 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
1450 INTEL_INFO(dev_priv)->gt == 2)
1451 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
1452 INTEL_INFO(dev_priv)->gt == 3)
1453 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
1454 INTEL_INFO(dev_priv)->gt == 4)
1455 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
1456 INTEL_INFO(dev_priv)->gt == 2)
1457 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
1458 INTEL_INFO(dev_priv)->gt == 3)
1459 #define IS_CFL_ULT(dev_priv) \
1460 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
1461 #define IS_CFL_ULX(dev_priv) \
1462 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
1463 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
1464 INTEL_INFO(dev_priv)->gt == 2)
1465 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
1466 INTEL_INFO(dev_priv)->gt == 3)
1467
1468 #define IS_CML_ULT(dev_priv) \
1469 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
1470 #define IS_CML_ULX(dev_priv) \
1471 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
1472 #define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
1473 INTEL_INFO(dev_priv)->gt == 2)
1474
1475 #define IS_CNL_WITH_PORT_F(dev_priv) \
1476 IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
1477 #define IS_ICL_WITH_PORT_F(dev_priv) \
1478 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
1479
1480 #define IS_TGL_U(dev_priv) \
1481 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT)
1482
1483 #define IS_TGL_Y(dev_priv) \
1484 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
1485
1486 #define SKL_REVID_A0 0x0
1487 #define SKL_REVID_B0 0x1
1488 #define SKL_REVID_C0 0x2
1489 #define SKL_REVID_D0 0x3
1490 #define SKL_REVID_E0 0x4
1491 #define SKL_REVID_F0 0x5
1492 #define SKL_REVID_G0 0x6
1493 #define SKL_REVID_H0 0x7
1494
1495 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
1496
1497 #define BXT_REVID_A0 0x0
1498 #define BXT_REVID_A1 0x1
1499 #define BXT_REVID_B0 0x3
1500 #define BXT_REVID_B_LAST 0x8
1501 #define BXT_REVID_C0 0x9
1502
1503 #define IS_BXT_REVID(dev_priv, since, until) \
1504 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
1505
1506 enum {
1507 KBL_REVID_A0,
1508 KBL_REVID_B0,
1509 KBL_REVID_B1,
1510 KBL_REVID_C0,
1511 KBL_REVID_D0,
1512 KBL_REVID_D1,
1513 KBL_REVID_E0,
1514 KBL_REVID_F0,
1515 KBL_REVID_G0,
1516 };
1517
1518 struct i915_rev_steppings {
1519 u8 gt_stepping;
1520 u8 disp_stepping;
1521 };
1522
1523 /* Defined in intel_workarounds.c */
1524 extern const struct i915_rev_steppings kbl_revids[];
1525
1526 #define IS_KBL_GT_REVID(dev_priv, since, until) \
1527 (IS_KABYLAKE(dev_priv) && \
1528 kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \
1529 kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until)
1530 #define IS_KBL_DISP_REVID(dev_priv, since, until) \
1531 (IS_KABYLAKE(dev_priv) && \
1532 kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \
1533 kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until)
1534
1535 #define GLK_REVID_A0 0x0
1536 #define GLK_REVID_A1 0x1
1537 #define GLK_REVID_A2 0x2
1538 #define GLK_REVID_B0 0x3
1539
1540 #define IS_GLK_REVID(dev_priv, since, until) \
1541 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
1542
1543 #define CNL_REVID_A0 0x0
1544 #define CNL_REVID_B0 0x1
1545 #define CNL_REVID_C0 0x2
1546
1547 #define IS_CNL_REVID(p, since, until) \
1548 (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
1549
1550 #define ICL_REVID_A0 0x0
1551 #define ICL_REVID_A2 0x1
1552 #define ICL_REVID_B0 0x3
1553 #define ICL_REVID_B2 0x4
1554 #define ICL_REVID_C0 0x5
1555
1556 #define IS_ICL_REVID(p, since, until) \
1557 (IS_ICELAKE(p) && IS_REVID(p, since, until))
1558
1559 #define EHL_REVID_A0 0x0
1560
1561 #define IS_EHL_REVID(p, since, until) \
1562 (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
1563
1564 enum {
1565 TGL_REVID_A0,
1566 TGL_REVID_B0,
1567 TGL_REVID_B1,
1568 TGL_REVID_C0,
1569 TGL_REVID_D0,
1570 };
1571
1572 extern const struct i915_rev_steppings tgl_uy_revids[];
1573 extern const struct i915_rev_steppings tgl_revids[];
1574
1575 static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private * dev_priv)1576 tgl_revids_get(struct drm_i915_private *dev_priv)
1577 {
1578 if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
1579 return tgl_uy_revids;
1580 else
1581 return tgl_revids;
1582 }
1583
1584 #define IS_TGL_DISP_REVID(p, since, until) \
1585 (IS_TIGERLAKE(p) && \
1586 tgl_revids_get(p)->disp_stepping >= (since) && \
1587 tgl_revids_get(p)->disp_stepping <= (until))
1588
1589 #define IS_TGL_UY_GT_REVID(p, since, until) \
1590 ((IS_TGL_U(p) || IS_TGL_Y(p)) && \
1591 tgl_uy_revids->gt_stepping >= (since) && \
1592 tgl_uy_revids->gt_stepping <= (until))
1593
1594 #define IS_TGL_GT_REVID(p, since, until) \
1595 (IS_TIGERLAKE(p) && \
1596 !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
1597 tgl_revids->gt_stepping >= (since) && \
1598 tgl_revids->gt_stepping <= (until))
1599
1600 #define RKL_REVID_A0 0x0
1601 #define RKL_REVID_B0 0x1
1602 #define RKL_REVID_C0 0x4
1603
1604 #define IS_RKL_REVID(p, since, until) \
1605 (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
1606
1607 #define DG1_REVID_A0 0x0
1608 #define DG1_REVID_B0 0x1
1609
1610 #define IS_DG1_REVID(p, since, until) \
1611 (IS_DG1(p) && IS_REVID(p, since, until))
1612
1613 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
1614 #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
1615 #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
1616
1617 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
1618 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1619
1620 #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \
1621 unsigned int first__ = (first); \
1622 unsigned int count__ = (count); \
1623 ((gt)->info.engine_mask & \
1624 GENMASK(first__ + count__ - 1, first__)) >> first__; \
1625 })
1626 #define VDBOX_MASK(gt) \
1627 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
1628 #define VEBOX_MASK(gt) \
1629 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
1630
1631 /*
1632 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1633 * All later gens can run the final buffer from the ppgtt
1634 */
1635 #define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
1636
1637 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
1638 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
1639 #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
1640 #define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
1641 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
1642 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
1643
1644 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
1645
1646 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
1647 (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
1648 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
1649 (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
1650 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
1651 (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
1652
1653 #define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
1654
1655 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
1656
1657 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1658 #define HAS_PPGTT(dev_priv) \
1659 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1660 #define HAS_FULL_PPGTT(dev_priv) \
1661 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
1662
1663 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
1664 GEM_BUG_ON((sizes) == 0); \
1665 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
1666 })
1667
1668 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
1669 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
1670 (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
1671
1672 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1673 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
1674
1675 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
1676 (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
1677
1678 /* WaRsDisableCoarsePowerGating:skl,cnl */
1679 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
1680 (IS_CANNONLAKE(dev_priv) || \
1681 IS_SKL_GT3(dev_priv) || \
1682 IS_SKL_GT4(dev_priv))
1683
1684 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
1685 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
1686 IS_GEMINILAKE(dev_priv) || \
1687 IS_KABYLAKE(dev_priv))
1688
1689 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1690 * rows, which changed the alignment requirements and fence programming.
1691 */
1692 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
1693 !(IS_I915G(dev_priv) || \
1694 IS_I915GM(dev_priv)))
1695 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
1696 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
1697
1698 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
1699 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
1700 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
1701
1702 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
1703
1704 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
1705
1706 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
1707 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
1708 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
1709 #define HAS_PSR_HW_TRACKING(dev_priv) \
1710 (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1711 #define HAS_PSR2_SEL_FETCH(dev_priv) (INTEL_GEN(dev_priv) >= 12)
1712 #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
1713
1714 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
1715 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
1716 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
1717
1718 #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
1719
1720 #define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
1721
1722 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1723 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
1724
1725 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
1726
1727 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1728 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
1729
1730 #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
1731
1732 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
1733
1734 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
1735
1736
1737 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
1738
1739 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
1740
1741 /* DPF == dynamic parity feature */
1742 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
1743 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
1744 2 : HAS_L3_DPF(dev_priv))
1745
1746 #define GT_FREQUENCY_MULTIPLIER 50
1747 #define GEN9_FREQ_SCALER 3
1748
1749 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
1750
1751 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
1752
1753 /* Only valid when HAS_DISPLAY() is true */
1754 #define INTEL_DISPLAY_ENABLED(dev_priv) \
1755 (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
1756
intel_vtd_active(void)1757 static inline bool intel_vtd_active(void)
1758 {
1759 #ifdef CONFIG_INTEL_IOMMU
1760 if (intel_iommu_gfx_mapped)
1761 return true;
1762 #endif
1763
1764 /* Running as a guest, we assume the host is enforcing VT'd */
1765 return !hypervisor_is_type(X86_HYPER_NATIVE);
1766 }
1767
intel_scanout_needs_vtd_wa(struct drm_i915_private * dev_priv)1768 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
1769 {
1770 return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
1771 }
1772
1773 static inline bool
intel_ggtt_update_needs_vtd_wa(struct drm_i915_private * dev_priv)1774 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
1775 {
1776 return IS_BROXTON(dev_priv) && intel_vtd_active();
1777 }
1778
1779 /* i915_drv.c */
1780 extern const struct dev_pm_ops i915_pm_ops;
1781
1782 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
1783 void i915_driver_remove(struct drm_i915_private *i915);
1784
1785 int i915_resume_switcheroo(struct drm_i915_private *i915);
1786 int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
1787
1788 int i915_getparam_ioctl(struct drm_device *dev, void *data,
1789 struct drm_file *file_priv);
1790
1791 /* i915_gem.c */
1792 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
1793 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
1794 void i915_gem_init_early(struct drm_i915_private *dev_priv);
1795 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
1796 int i915_gem_freeze(struct drm_i915_private *dev_priv);
1797 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
1798
1799 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
1800
i915_gem_drain_freed_objects(struct drm_i915_private * i915)1801 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1802 {
1803 /*
1804 * A single pass should suffice to release all the freed objects (along
1805 * most call paths) , but be a little more paranoid in that freeing
1806 * the objects does take a little amount of time, during which the rcu
1807 * callbacks could have added new objects into the freed list, and
1808 * armed the work again.
1809 */
1810 while (atomic_read(&i915->mm.free_count)) {
1811 flush_work(&i915->mm.free_work);
1812 rcu_barrier();
1813 }
1814 }
1815
i915_gem_drain_workqueue(struct drm_i915_private * i915)1816 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1817 {
1818 /*
1819 * Similar to objects above (see i915_gem_drain_freed-objects), in
1820 * general we have workers that are armed by RCU and then rearm
1821 * themselves in their callbacks. To be paranoid, we need to
1822 * drain the workqueue a second time after waiting for the RCU
1823 * grace period so that we catch work queued via RCU from the first
1824 * pass. As neither drain_workqueue() nor flush_workqueue() report
1825 * a result, we make an assumption that we only don't require more
1826 * than 3 passes to catch all _recursive_ RCU delayed work.
1827 *
1828 */
1829 int pass = 3;
1830 do {
1831 flush_workqueue(i915->wq);
1832 rcu_barrier();
1833 i915_gem_drain_freed_objects(i915);
1834 } while (--pass);
1835 drain_workqueue(i915->wq);
1836 }
1837
1838 struct i915_vma * __must_check
1839 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1840 struct i915_gem_ww_ctx *ww,
1841 const struct i915_ggtt_view *view,
1842 u64 size, u64 alignment, u64 flags);
1843
1844 static inline struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object * obj,const struct i915_ggtt_view * view,u64 size,u64 alignment,u64 flags)1845 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1846 const struct i915_ggtt_view *view,
1847 u64 size, u64 alignment, u64 flags)
1848 {
1849 return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
1850 }
1851
1852 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1853 unsigned long flags);
1854 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
1855 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
1856 #define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
1857
1858 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
1859
1860 int i915_gem_dumb_create(struct drm_file *file_priv,
1861 struct drm_device *dev,
1862 struct drm_mode_create_dumb *args);
1863
1864 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
1865
i915_reset_count(struct i915_gpu_error * error)1866 static inline u32 i915_reset_count(struct i915_gpu_error *error)
1867 {
1868 return atomic_read(&error->reset_count);
1869 }
1870
i915_reset_engine_count(struct i915_gpu_error * error,const struct intel_engine_cs * engine)1871 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
1872 const struct intel_engine_cs *engine)
1873 {
1874 return atomic_read(&error->reset_engine_count[engine->uabi_class]);
1875 }
1876
1877 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
1878 void i915_gem_driver_register(struct drm_i915_private *i915);
1879 void i915_gem_driver_unregister(struct drm_i915_private *i915);
1880 void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
1881 void i915_gem_driver_release(struct drm_i915_private *dev_priv);
1882 void i915_gem_suspend(struct drm_i915_private *dev_priv);
1883 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
1884 void i915_gem_resume(struct drm_i915_private *dev_priv);
1885
1886 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
1887
1888 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1889 enum i915_cache_level cache_level);
1890
1891 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1892 struct dma_buf *dma_buf);
1893
1894 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
1895
1896 static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private * file_priv,u32 id)1897 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
1898 {
1899 return xa_load(&file_priv->context_xa, id);
1900 }
1901
1902 static inline struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private * file_priv,u32 id)1903 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1904 {
1905 struct i915_gem_context *ctx;
1906
1907 rcu_read_lock();
1908 ctx = __i915_gem_context_lookup_rcu(file_priv, id);
1909 if (ctx && !kref_get_unless_zero(&ctx->ref))
1910 ctx = NULL;
1911 rcu_read_unlock();
1912
1913 return ctx;
1914 }
1915
1916 /* i915_gem_evict.c */
1917 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
1918 u64 min_size, u64 alignment,
1919 unsigned long color,
1920 u64 start, u64 end,
1921 unsigned flags);
1922 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
1923 struct drm_mm_node *node,
1924 unsigned int flags);
1925 int i915_gem_evict_vm(struct i915_address_space *vm);
1926
1927 /* i915_gem_internal.c */
1928 struct drm_i915_gem_object *
1929 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
1930 phys_addr_t size);
1931
1932 /* i915_gem_tiling.c */
i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object * obj)1933 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1934 {
1935 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1936
1937 return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1938 i915_gem_object_is_tiled(obj);
1939 }
1940
1941 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
1942 unsigned int tiling, unsigned int stride);
1943 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
1944 unsigned int tiling, unsigned int stride);
1945
1946 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
1947
1948 /* i915_cmd_parser.c */
1949 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
1950 int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
1951 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
1952 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1953 struct i915_vma *batch,
1954 unsigned long batch_offset,
1955 unsigned long batch_length,
1956 struct i915_vma *shadow,
1957 bool trampoline);
1958 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
1959
1960 /* intel_device_info.c */
1961 static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private * dev_priv)1962 mkwrite_device_info(struct drm_i915_private *dev_priv)
1963 {
1964 return (struct intel_device_info *)INTEL_INFO(dev_priv);
1965 }
1966
1967 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1968 struct drm_file *file);
1969
1970 #define __I915_REG_OP(op__, dev_priv__, ...) \
1971 intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
1972
1973 #define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
1974 #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
1975
1976 #define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
1977
1978 /* These are untraced mmio-accessors that are only valid to be used inside
1979 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
1980 * controlled.
1981 *
1982 * Think twice, and think again, before using these.
1983 *
1984 * As an example, these accessors can possibly be used between:
1985 *
1986 * spin_lock_irq(&dev_priv->uncore.lock);
1987 * intel_uncore_forcewake_get__locked();
1988 *
1989 * and
1990 *
1991 * intel_uncore_forcewake_put__locked();
1992 * spin_unlock_irq(&dev_priv->uncore.lock);
1993 *
1994 *
1995 * Note: some registers may not need forcewake held, so
1996 * intel_uncore_forcewake_{get,put} can be omitted, see
1997 * intel_uncore_forcewake_for_reg().
1998 *
1999 * Certain architectures will die if the same cacheline is concurrently accessed
2000 * by different clients (e.g. on Ivybridge). Access to registers should
2001 * therefore generally be serialised, by either the dev_priv->uncore.lock or
2002 * a more localised lock guarding all access to that bank of registers.
2003 */
2004 #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
2005 #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
2006
2007 /* i915_mm.c */
2008 int remap_io_mapping(struct vm_area_struct *vma,
2009 unsigned long addr, unsigned long pfn, unsigned long size,
2010 struct io_mapping *iomap);
2011 int remap_io_sg(struct vm_area_struct *vma,
2012 unsigned long addr, unsigned long size,
2013 struct scatterlist *sgl, resource_size_t iobase);
2014
intel_hws_csb_write_index(struct drm_i915_private * i915)2015 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
2016 {
2017 if (INTEL_GEN(i915) >= 10)
2018 return CNL_HWS_CSB_WRITE_INDEX;
2019 else
2020 return I915_HWS_CSB_WRITE_INDEX;
2021 }
2022
2023 static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private * i915)2024 i915_coherent_map_type(struct drm_i915_private *i915)
2025 {
2026 return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
2027 }
2028
i915_cs_timestamp_ns_to_ticks(struct drm_i915_private * i915,u64 val)2029 static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
2030 {
2031 return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
2032 1000000000);
2033 }
2034
i915_cs_timestamp_ticks_to_ns(struct drm_i915_private * i915,u64 val)2035 static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
2036 {
2037 return div_u64(val * 1000000000,
2038 RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
2039 }
2040
2041 #endif
2042