• Home
  • Raw
  • Download

Lines Matching full:hotplug

33  * DOC: Hotplug
35 * Simply put, hotplug occurs when a display is connected to or disconnected
39 * Hotplug in i915 is handled in many different levels of abstraction.
43 * handlers gather the hotplug detect (HPD) information from relevant registers
44 * into a platform independent mask of hotplug pins that have fired.
47 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
49 * regular hotplug).
53 * pulses, with failures and non-MST long pulses triggering regular hotplug
56 * The regular hotplug work function i915_hotplug_work_func() calls connector
57 * detect hooks, and, if connector status changes, triggers sending of hotplug
61 * the hotplug uevent, disabling or enabling the crtc as needed.
63 * The hotplug interrupt storm detection and mitigation code keeps track of the
64 * number of interrupts per hotplug pin per a period of time, and if the number
70 * Current implementation expects that hotplug interrupt storm will not be
126 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
131 * and should only be adjusted for automated hotplug testing.
138 unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; in intel_hpd_irq_storm_detect()
140 const int threshold = dev_priv->hotplug.hpd_storm_threshold; in intel_hpd_irq_storm_detect()
144 dev_priv->hotplug.stats[pin].last_jiffies = jiffies; in intel_hpd_irq_storm_detect()
145 dev_priv->hotplug.stats[pin].count = 0; in intel_hpd_irq_storm_detect()
147 } else if (dev_priv->hotplug.stats[pin].count > threshold && in intel_hpd_irq_storm_detect()
149 dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; in intel_hpd_irq_storm_detect()
153 dev_priv->hotplug.stats[pin].count++; in intel_hpd_irq_storm_detect()
155 dev_priv->hotplug.stats[pin].count); in intel_hpd_irq_storm_detect()
185 dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) in intel_hpd_irq_storm_disable()
189 "switching from hotplug detection to polling\n", in intel_hpd_irq_storm_disable()
192 dev_priv->hotplug.stats[pin].state = HPD_DISABLED; in intel_hpd_irq_storm_disable()
199 /* Enable polling and queue hotplug re-enabling. */ in intel_hpd_irq_storm_disable()
202 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, in intel_hpd_irq_storm_disable()
211 hotplug.reenable_work.work); in intel_hpd_irq_storm_reenable_work()
222 if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED) in intel_hpd_irq_storm_reenable_work()
225 dev_priv->hotplug.stats[pin].state = HPD_ENABLED; in intel_hpd_irq_storm_reenable_work()
284 container_of(work, struct drm_i915_private, hotplug.dig_port_work); in i915_digport_work_func()
290 long_port_mask = dev_priv->hotplug.long_port_mask; in i915_digport_work_func()
291 dev_priv->hotplug.long_port_mask = 0; in i915_digport_work_func()
292 short_port_mask = dev_priv->hotplug.short_port_mask; in i915_digport_work_func()
293 dev_priv->hotplug.short_port_mask = 0; in i915_digport_work_func()
322 dev_priv->hotplug.event_bits |= old_bits; in i915_digport_work_func()
324 schedule_work(&dev_priv->hotplug.hotplug_work); in i915_digport_work_func()
329 * Handle hotplug events outside the interrupt handler proper.
334 container_of(work, struct drm_i915_private, hotplug.hotplug_work); in i915_hotplug_work_func()
344 DRM_DEBUG_KMS("running encoder hotplug functions\n"); in i915_hotplug_work_func()
348 hpd_event_bits = dev_priv->hotplug.event_bits; in i915_hotplug_work_func()
349 dev_priv->hotplug.event_bits = 0; in i915_hotplug_work_func()
351 /* Disable hotplug on connectors that hit an irq storm. */ in i915_hotplug_work_func()
363 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", in i915_hotplug_work_func()
366 changed |= intel_encoder->hotplug(intel_encoder, in i915_hotplug_work_func()
379 * intel_hpd_irq_handler - main hotplug irq handler
384 * This is the main hotplug irq handler for all platforms. The platform specific
385 * irq handlers call the platform specific hotplug irq handlers, which read and
391 * Here, we do hotplug irq storm detection and mitigation, and pass further
435 dev_priv->hotplug.long_port_mask |= BIT(port); in intel_hpd_irq_handler()
438 dev_priv->hotplug.short_port_mask |= BIT(port); in intel_hpd_irq_handler()
449 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { in intel_hpd_irq_handler()
453 * hotplug bits itself. So only WARN about unexpected in intel_hpd_irq_handler()
461 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) in intel_hpd_irq_handler()
472 dev_priv->hotplug.event_bits |= BIT(pin); in intel_hpd_irq_handler()
481 dev_priv->hotplug.event_bits &= ~BIT(pin); in intel_hpd_irq_handler()
491 * Our hotplug handler can grab modeset locks (by calling down into the in intel_hpd_irq_handler()
497 queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); in intel_hpd_irq_handler()
499 schedule_work(&dev_priv->hotplug.hotplug_work); in intel_hpd_irq_handler()
506 * This function enables the hotplug support. It requires that interrupts have
507 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
521 dev_priv->hotplug.stats[i].count = 0; in intel_hpd_init()
522 dev_priv->hotplug.stats[i].state = HPD_ENABLED; in intel_hpd_init()
525 WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); in intel_hpd_init()
526 schedule_work(&dev_priv->hotplug.poll_init_work); in intel_hpd_init()
544 hotplug.poll_init_work); in i915_hpd_poll_init_work()
552 enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); in i915_hpd_poll_init_work()
593 * not they support hotplug detection. Under certain conditions HPD may not be
599 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
606 WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); in intel_hpd_poll_init()
614 schedule_work(&dev_priv->hotplug.poll_init_work); in intel_hpd_poll_init()
619 INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); in intel_hpd_init_work()
620 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); in intel_hpd_init_work()
621 INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); in intel_hpd_init_work()
622 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, in intel_hpd_init_work()
630 dev_priv->hotplug.long_port_mask = 0; in intel_hpd_cancel_work()
631 dev_priv->hotplug.short_port_mask = 0; in intel_hpd_cancel_work()
632 dev_priv->hotplug.event_bits = 0; in intel_hpd_cancel_work()
636 cancel_work_sync(&dev_priv->hotplug.dig_port_work); in intel_hpd_cancel_work()
637 cancel_work_sync(&dev_priv->hotplug.hotplug_work); in intel_hpd_cancel_work()
638 cancel_work_sync(&dev_priv->hotplug.poll_init_work); in intel_hpd_cancel_work()
639 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); in intel_hpd_cancel_work()
650 if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) { in intel_hpd_disable()
651 dev_priv->hotplug.stats[pin].state = HPD_DISABLED; in intel_hpd_disable()
665 dev_priv->hotplug.stats[pin].state = HPD_ENABLED; in intel_hpd_enable()