|
|
|
@@ -177,6 +177,46 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
|
|
|
|
|
return storm;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool detection_work_enabled(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
lockdep_assert_held(&i915->irq_lock);
|
|
|
|
|
|
|
|
|
|
return i915->display.hotplug.detection_work_enabled;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
|
|
|
|
|
{
|
|
|
|
|
lockdep_assert_held(&i915->irq_lock);
|
|
|
|
|
|
|
|
|
|
if (!detection_work_enabled(i915))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return mod_delayed_work(i915->unordered_wq, work, delay);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
|
|
|
|
|
{
|
|
|
|
|
lockdep_assert_held(&i915->irq_lock);
|
|
|
|
|
|
|
|
|
|
if (!detection_work_enabled(i915))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return queue_delayed_work(i915->unordered_wq, work, delay);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
|
lockdep_assert_held(&i915->irq_lock);
|
|
|
|
|
|
|
|
|
|
if (!detection_work_enabled(i915))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return queue_work(i915->unordered_wq, work);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
|
|
|
|
|
{
|
|
|
|
@@ -213,9 +253,9 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
|
|
|
|
|
/* Enable polling and queue hotplug re-enabling. */
|
|
|
|
|
if (hpd_disabled) {
|
|
|
|
|
drm_kms_helper_poll_reschedule(&dev_priv->drm);
|
|
|
|
|
mod_delayed_work(dev_priv->unordered_wq,
|
|
|
|
|
&dev_priv->display.hotplug.reenable_work,
|
|
|
|
|
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
|
|
|
|
|
mod_delayed_detection_work(dev_priv,
|
|
|
|
|
&dev_priv->display.hotplug.reenable_work,
|
|
|
|
|
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -348,9 +388,9 @@ static void i915_digport_work_func(struct work_struct *work)
|
|
|
|
|
if (old_bits) {
|
|
|
|
|
spin_lock_irq(&dev_priv->irq_lock);
|
|
|
|
|
dev_priv->display.hotplug.event_bits |= old_bits;
|
|
|
|
|
queue_delayed_detection_work(dev_priv,
|
|
|
|
|
&dev_priv->display.hotplug.hotplug_work, 0);
|
|
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
|
|
|
|
queue_delayed_work(dev_priv->unordered_wq,
|
|
|
|
|
&dev_priv->display.hotplug.hotplug_work, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -467,11 +507,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
|
|
|
|
if (retry) {
|
|
|
|
|
spin_lock_irq(&dev_priv->irq_lock);
|
|
|
|
|
dev_priv->display.hotplug.retry_bits |= retry;
|
|
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
|
|
|
|
|
|
|
|
|
mod_delayed_work(dev_priv->unordered_wq,
|
|
|
|
|
&dev_priv->display.hotplug.hotplug_work,
|
|
|
|
|
msecs_to_jiffies(HPD_RETRY_DELAY));
|
|
|
|
|
mod_delayed_detection_work(dev_priv,
|
|
|
|
|
&dev_priv->display.hotplug.hotplug_work,
|
|
|
|
|
msecs_to_jiffies(HPD_RETRY_DELAY));
|
|
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -590,7 +630,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|
|
|
|
*/
|
|
|
|
|
if (storm_detected)
|
|
|
|
|
intel_hpd_irq_setup(dev_priv);
|
|
|
|
|
spin_unlock(&dev_priv->irq_lock);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Our hotplug handler can grab modeset locks (by calling down into the
|
|
|
|
@@ -601,8 +640,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|
|
|
|
if (queue_dig)
|
|
|
|
|
queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
|
|
|
|
|
if (queue_hp)
|
|
|
|
|
queue_delayed_work(dev_priv->unordered_wq,
|
|
|
|
|
&dev_priv->display.hotplug.hotplug_work, 0);
|
|
|
|
|
queue_delayed_detection_work(dev_priv,
|
|
|
|
|
&dev_priv->display.hotplug.hotplug_work, 0);
|
|
|
|
|
|
|
|
|
|
spin_unlock(&dev_priv->irq_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@@ -781,8 +822,10 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
|
|
|
|
|
* As well, there's no issue if we race here since we always reschedule
|
|
|
|
|
* this worker anyway
|
|
|
|
|
*/
|
|
|
|
|
queue_work(dev_priv->unordered_wq,
|
|
|
|
|
&dev_priv->display.hotplug.poll_init_work);
|
|
|
|
|
spin_lock_irq(&dev_priv->irq_lock);
|
|
|
|
|
queue_detection_work(dev_priv,
|
|
|
|
|
&dev_priv->display.hotplug.poll_init_work);
|
|
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@@ -810,8 +853,11 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
|
|
|
|
|
queue_work(dev_priv->unordered_wq,
|
|
|
|
|
&dev_priv->display.hotplug.poll_init_work);
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(&dev_priv->irq_lock);
|
|
|
|
|
queue_detection_work(dev_priv,
|
|
|
|
|
&dev_priv->display.hotplug.poll_init_work);
|
|
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void intel_hpd_init_early(struct drm_i915_private *i915)
|
|
|
|
@@ -833,6 +879,20 @@ void intel_hpd_init_early(struct drm_i915_private *i915)
|
|
|
|
|
i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool cancel_all_detection_work(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
bool was_pending = false;
|
|
|
|
|
|
|
|
|
|
if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
|
|
|
|
|
was_pending = true;
|
|
|
|
|
if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
|
|
|
|
|
was_pending = true;
|
|
|
|
|
if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
|
|
|
|
|
was_pending = true;
|
|
|
|
|
|
|
|
|
|
return was_pending;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
|
|
|
|
|
{
|
|
|
|
|
if (!HAS_DISPLAY(dev_priv))
|
|
|
|
@@ -848,9 +908,13 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
|
|
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
|
|
|
|
|
|
|
|
|
cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
|
|
|
|
|
cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
|
|
|
|
|
cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
|
|
|
|
|
cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* All other work triggered by hotplug events should be canceled by
|
|
|
|
|
* now.
|
|
|
|
|
*/
|
|
|
|
|
if (cancel_all_detection_work(dev_priv))
|
|
|
|
|
drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
|
|
|
|
@@ -880,6 +944,62 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
|
|
|
|
|
spin_unlock_irq(&dev_priv->irq_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
bool queue_work = false;
|
|
|
|
|
enum hpd_pin pin;
|
|
|
|
|
|
|
|
|
|
lockdep_assert_held(&i915->irq_lock);
|
|
|
|
|
|
|
|
|
|
if (i915->display.hotplug.event_bits ||
|
|
|
|
|
i915->display.hotplug.retry_bits)
|
|
|
|
|
queue_work = true;
|
|
|
|
|
|
|
|
|
|
for_each_hpd_pin(pin) {
|
|
|
|
|
switch (i915->display.hotplug.stats[pin].state) {
|
|
|
|
|
case HPD_MARK_DISABLED:
|
|
|
|
|
queue_work = true;
|
|
|
|
|
break;
|
|
|
|
|
case HPD_ENABLED:
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
MISSING_CASE(i915->display.hotplug.stats[pin].state);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (queue_work)
|
|
|
|
|
queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
spin_lock_irq(&i915->irq_lock);
|
|
|
|
|
i915->display.hotplug.detection_work_enabled = true;
|
|
|
|
|
queue_work_for_missed_irqs(i915);
|
|
|
|
|
spin_unlock_irq(&i915->irq_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
spin_lock_irq(&i915->irq_lock);
|
|
|
|
|
i915->display.hotplug.detection_work_enabled = false;
|
|
|
|
|
spin_unlock_irq(&i915->irq_lock);
|
|
|
|
|
|
|
|
|
|
cancel_all_detection_work(i915);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&i915->irq_lock, flags);
|
|
|
|
|
ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
|
|
|
|
|
spin_unlock_irqrestore(&i915->irq_lock, flags);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = m->private;
|
|
|
|
|