LCOV - code coverage report
Current view: top level - dev/pci/drm/i915 - i915_irq.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 2064 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 152 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
       2             :  */
       3             : /*
       4             :  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
       5             :  * All Rights Reserved.
       6             :  *
       7             :  * Permission is hereby granted, free of charge, to any person obtaining a
       8             :  * copy of this software and associated documentation files (the
       9             :  * "Software"), to deal in the Software without restriction, including
      10             :  * without limitation the rights to use, copy, modify, merge, publish,
      11             :  * distribute, sub license, and/or sell copies of the Software, and to
      12             :  * permit persons to whom the Software is furnished to do so, subject to
      13             :  * the following conditions:
      14             :  *
      15             :  * The above copyright notice and this permission notice (including the
      16             :  * next paragraph) shall be included in all copies or substantial portions
      17             :  * of the Software.
      18             :  *
      19             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
      20             :  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
      21             :  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
      22             :  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
      23             :  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
      24             :  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
      25             :  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
      26             :  *
      27             :  */
      28             : 
      29             : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      30             : 
      31             : #ifdef __linux__
      32             : #include <linux/sysrq.h>
      33             : #include <linux/slab.h>
      34             : #include <linux/circ_buf.h>
      35             : #endif
      36             : #include <dev/pci/drm/drmP.h>
      37             : #include <dev/pci/drm/i915_drm.h>
      38             : #include "i915_drv.h"
      39             : #include "i915_trace.h"
      40             : #include "intel_drv.h"
      41             : 
      42             : /**
      43             :  * DOC: interrupt handling
      44             :  *
      45             :  * These functions provide the basic support for enabling and disabling the
      46             :  * interrupt handling support. There's a lot more functionality in i915_irq.c
      47             :  * and related files, but that will be described in separate chapters.
      48             :  */
      49             : 
      50             : static const u32 hpd_ilk[HPD_NUM_PINS] = {
      51             :         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
      52             : };
      53             : 
      54             : static const u32 hpd_ivb[HPD_NUM_PINS] = {
      55             :         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
      56             : };
      57             : 
      58             : static const u32 hpd_bdw[HPD_NUM_PINS] = {
      59             :         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
      60             : };
      61             : 
      62             : static const u32 hpd_ibx[HPD_NUM_PINS] = {
      63             :         [HPD_CRT] = SDE_CRT_HOTPLUG,
      64             :         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
      65             :         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
      66             :         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
      67             :         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
      68             : };
      69             : 
      70             : static const u32 hpd_cpt[HPD_NUM_PINS] = {
      71             :         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
      72             :         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
      73             :         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
      74             :         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
      75             :         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
      76             : };
      77             : 
      78             : static const u32 hpd_spt[HPD_NUM_PINS] = {
      79             :         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
      80             :         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
      81             :         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
      82             :         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
      83             :         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
      84             : };
      85             : 
      86             : static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
      87             :         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
      88             :         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
      89             :         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
      90             :         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
      91             :         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
      92             :         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
      93             : };
      94             : 
      95             : static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
      96             :         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
      97             :         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
      98             :         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
      99             :         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
     100             :         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
     101             :         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
     102             : };
     103             : 
     104             : static const u32 hpd_status_i915[HPD_NUM_PINS] = {
     105             :         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
     106             :         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
     107             :         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
     108             :         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
     109             :         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
     110             :         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
     111             : };
     112             : 
     113             : /* BXT hpd list */
     114             : static const u32 hpd_bxt[HPD_NUM_PINS] = {
     115             :         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
     116             :         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
     117             :         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
     118             : };
     119             : 
     120             : /* IIR can theoretically queue up two events. Be paranoid. */
     121             : #define GEN8_IRQ_RESET_NDX(type, which) do { \
     122             :         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
     123             :         POSTING_READ(GEN8_##type##_IMR(which)); \
     124             :         I915_WRITE(GEN8_##type##_IER(which), 0); \
     125             :         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
     126             :         POSTING_READ(GEN8_##type##_IIR(which)); \
     127             :         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
     128             :         POSTING_READ(GEN8_##type##_IIR(which)); \
     129             : } while (0)
     130             : 
     131             : #define GEN5_IRQ_RESET(type) do { \
     132             :         I915_WRITE(type##IMR, 0xffffffff); \
     133             :         POSTING_READ(type##IMR); \
     134             :         I915_WRITE(type##IER, 0); \
     135             :         I915_WRITE(type##IIR, 0xffffffff); \
     136             :         POSTING_READ(type##IIR); \
     137             :         I915_WRITE(type##IIR, 0xffffffff); \
     138             :         POSTING_READ(type##IIR); \
     139             : } while (0)
     140             : 
     141             : /*
     142             :  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
     143             :  */
     144           0 : static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
     145             : {
     146           0 :         u32 val = I915_READ(reg);
     147             : 
     148           0 :         if (val == 0)
     149           0 :                 return;
     150             : 
     151           0 :         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
     152             :              reg, val);
     153           0 :         I915_WRITE(reg, 0xffffffff);
     154           0 :         POSTING_READ(reg);
     155           0 :         I915_WRITE(reg, 0xffffffff);
     156           0 :         POSTING_READ(reg);
     157           0 : }
     158             : 
     159             : #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
     160             :         gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
     161             :         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
     162             :         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
     163             :         POSTING_READ(GEN8_##type##_IMR(which)); \
     164             : } while (0)
     165             : 
     166             : #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
     167             :         gen5_assert_iir_is_zero(dev_priv, type##IIR); \
     168             :         I915_WRITE(type##IER, (ier_val)); \
     169             :         I915_WRITE(type##IMR, (imr_val)); \
     170             :         POSTING_READ(type##IMR); \
     171             : } while (0)
     172             : 
     173             : static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
     174             : 
     175             : /* For display hotplug interrupt */
     176             : static inline void
     177           0 : i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
     178             :                                      uint32_t mask,
     179             :                                      uint32_t bits)
     180             : {
     181             :         uint32_t val;
     182             : 
     183           0 :         assert_spin_locked(&dev_priv->irq_lock);
     184           0 :         WARN_ON(bits & ~mask);
     185             : 
     186           0 :         val = I915_READ(PORT_HOTPLUG_EN);
     187           0 :         val &= ~mask;
     188           0 :         val |= bits;
     189           0 :         I915_WRITE(PORT_HOTPLUG_EN, val);
     190           0 : }
     191             : 
     192             : /**
     193             :  * i915_hotplug_interrupt_update - update hotplug interrupt enable
     194             :  * @dev_priv: driver private
     195             :  * @mask: bits to update
     196             :  * @bits: bits to enable
     197             :  * NOTE: the HPD enable bits are modified both inside and outside
     198             :  * of an interrupt context. To avoid that read-modify-write cycles
     199             :  * interfer, these bits are protected by a spinlock. Since this
     200             :  * function is usually not called from a context where the lock is
     201             :  * held already, this function acquires the lock itself. A non-locking
     202             :  * version is also available.
     203             :  */
     204           0 : void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
     205             :                                    uint32_t mask,
     206             :                                    uint32_t bits)
     207             : {
     208           0 :         spin_lock_irq(&dev_priv->irq_lock);
     209           0 :         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
     210           0 :         spin_unlock_irq(&dev_priv->irq_lock);
     211           0 : }
     212             : 
     213             : /**
     214             :  * ilk_update_display_irq - update DEIMR
     215             :  * @dev_priv: driver private
     216             :  * @interrupt_mask: mask of interrupt bits to update
     217             :  * @enabled_irq_mask: mask of interrupt bits to enable
     218             :  */
     219           0 : static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
     220             :                                    uint32_t interrupt_mask,
     221             :                                    uint32_t enabled_irq_mask)
     222             : {
     223             :         uint32_t new_val;
     224             : 
     225           0 :         assert_spin_locked(&dev_priv->irq_lock);
     226             : 
     227           0 :         WARN_ON(enabled_irq_mask & ~interrupt_mask);
     228             : 
     229           0 :         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
     230           0 :                 return;
     231             : 
     232           0 :         new_val = dev_priv->irq_mask;
     233           0 :         new_val &= ~interrupt_mask;
     234           0 :         new_val |= (~enabled_irq_mask & interrupt_mask);
     235             : 
     236           0 :         if (new_val != dev_priv->irq_mask) {
     237           0 :                 dev_priv->irq_mask = new_val;
     238           0 :                 I915_WRITE(DEIMR, dev_priv->irq_mask);
     239           0 :                 POSTING_READ(DEIMR);
     240           0 :         }
     241           0 : }
     242             : 
     243             : void
     244           0 : ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
     245             : {
     246           0 :         ilk_update_display_irq(dev_priv, mask, mask);
     247           0 : }
     248             : 
     249             : void
     250           0 : ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
     251             : {
     252           0 :         ilk_update_display_irq(dev_priv, mask, 0);
     253           0 : }
     254             : 
     255             : /**
     256             :  * ilk_update_gt_irq - update GTIMR
     257             :  * @dev_priv: driver private
     258             :  * @interrupt_mask: mask of interrupt bits to update
     259             :  * @enabled_irq_mask: mask of interrupt bits to enable
     260             :  */
     261           0 : static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
     262             :                               uint32_t interrupt_mask,
     263             :                               uint32_t enabled_irq_mask)
     264             : {
     265           0 :         assert_spin_locked(&dev_priv->irq_lock);
     266             : 
     267           0 :         WARN_ON(enabled_irq_mask & ~interrupt_mask);
     268             : 
     269           0 :         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
     270             :                 return;
     271             : 
     272           0 :         dev_priv->gt_irq_mask &= ~interrupt_mask;
     273           0 :         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
     274           0 :         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
     275           0 :         POSTING_READ(GTIMR);
     276           0 : }
     277             : 
     278           0 : void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
     279             : {
     280           0 :         ilk_update_gt_irq(dev_priv, mask, mask);
     281           0 : }
     282             : 
     283           0 : void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
     284             : {
     285           0 :         ilk_update_gt_irq(dev_priv, mask, 0);
     286           0 : }
     287             : 
     288           0 : static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
     289             : {
     290           0 :         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
     291             : }
     292             : 
     293           0 : static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
     294             : {
     295           0 :         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
     296             : }
     297             : 
     298           0 : static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
     299             : {
     300           0 :         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
     301             : }
     302             : 
     303             : /**
     304             :   * snb_update_pm_irq - update GEN6_PMIMR
     305             :   * @dev_priv: driver private
     306             :   * @interrupt_mask: mask of interrupt bits to update
     307             :   * @enabled_irq_mask: mask of interrupt bits to enable
     308             :   */
     309           0 : static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
     310             :                               uint32_t interrupt_mask,
     311             :                               uint32_t enabled_irq_mask)
     312             : {
     313             :         uint32_t new_val;
     314             : 
     315           0 :         WARN_ON(enabled_irq_mask & ~interrupt_mask);
     316             : 
     317           0 :         assert_spin_locked(&dev_priv->irq_lock);
     318             : 
     319           0 :         new_val = dev_priv->pm_irq_mask;
     320           0 :         new_val &= ~interrupt_mask;
     321           0 :         new_val |= (~enabled_irq_mask & interrupt_mask);
     322             : 
     323           0 :         if (new_val != dev_priv->pm_irq_mask) {
     324           0 :                 dev_priv->pm_irq_mask = new_val;
     325           0 :                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
     326           0 :                 POSTING_READ(gen6_pm_imr(dev_priv));
     327           0 :         }
     328           0 : }
     329             : 
     330           0 : void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
     331             : {
     332           0 :         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
     333             :                 return;
     334             : 
     335           0 :         snb_update_pm_irq(dev_priv, mask, mask);
     336           0 : }
     337             : 
     338           0 : static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
     339             :                                   uint32_t mask)
     340             : {
     341           0 :         snb_update_pm_irq(dev_priv, mask, 0);
     342           0 : }
     343             : 
     344           0 : void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
     345             : {
     346           0 :         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
     347             :                 return;
     348             : 
     349           0 :         __gen6_disable_pm_irq(dev_priv, mask);
     350           0 : }
     351             : 
     352           0 : void gen6_reset_rps_interrupts(struct drm_device *dev)
     353             : {
     354           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     355           0 :         uint32_t reg = gen6_pm_iir(dev_priv);
     356             : 
     357           0 :         spin_lock_irq(&dev_priv->irq_lock);
     358           0 :         I915_WRITE(reg, dev_priv->pm_rps_events);
     359           0 :         I915_WRITE(reg, dev_priv->pm_rps_events);
     360           0 :         POSTING_READ(reg);
     361           0 :         dev_priv->rps.pm_iir = 0;
     362           0 :         spin_unlock_irq(&dev_priv->irq_lock);
     363           0 : }
     364             : 
     365           0 : void gen6_enable_rps_interrupts(struct drm_device *dev)
     366             : {
     367           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     368             : 
     369           0 :         spin_lock_irq(&dev_priv->irq_lock);
     370             : 
     371           0 :         WARN_ON(dev_priv->rps.pm_iir);
     372           0 :         WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
     373           0 :         dev_priv->rps.interrupts_enabled = true;
     374           0 :         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
     375             :                                 dev_priv->pm_rps_events);
     376           0 :         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
     377             : 
     378           0 :         spin_unlock_irq(&dev_priv->irq_lock);
     379           0 : }
     380             : 
     381           0 : u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
     382             : {
     383             :         /*
     384             :          * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
     385             :          * if GEN6_PM_UP_EI_EXPIRED is masked.
     386             :          *
     387             :          * TODO: verify if this can be reproduced on VLV,CHV.
     388             :          */
     389           0 :         if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
     390           0 :                 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
     391             : 
     392           0 :         if (INTEL_INFO(dev_priv)->gen >= 8)
     393           0 :                 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
     394             : 
     395           0 :         return mask;
     396             : }
     397             : 
     398           0 : void gen6_disable_rps_interrupts(struct drm_device *dev)
     399             : {
     400           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     401             : 
     402           0 :         spin_lock_irq(&dev_priv->irq_lock);
     403           0 :         dev_priv->rps.interrupts_enabled = false;
     404           0 :         spin_unlock_irq(&dev_priv->irq_lock);
     405             : 
     406           0 :         cancel_work_sync(&dev_priv->rps.work);
     407             : 
     408           0 :         spin_lock_irq(&dev_priv->irq_lock);
     409             : 
     410           0 :         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
     411             : 
     412           0 :         __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
     413           0 :         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
     414             :                                 ~dev_priv->pm_rps_events);
     415             : 
     416           0 :         spin_unlock_irq(&dev_priv->irq_lock);
     417             : 
     418             :         synchronize_irq(dev->irq);
     419           0 : }
     420             : 
     421             : /**
     422             :   * bdw_update_port_irq - update DE port interrupt
     423             :   * @dev_priv: driver private
     424             :   * @interrupt_mask: mask of interrupt bits to update
     425             :   * @enabled_irq_mask: mask of interrupt bits to enable
     426             :   */
     427           0 : static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
     428             :                                 uint32_t interrupt_mask,
     429             :                                 uint32_t enabled_irq_mask)
     430             : {
     431             :         uint32_t new_val;
     432             :         uint32_t old_val;
     433             : 
     434           0 :         assert_spin_locked(&dev_priv->irq_lock);
     435             : 
     436           0 :         WARN_ON(enabled_irq_mask & ~interrupt_mask);
     437             : 
     438           0 :         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
     439           0 :                 return;
     440             : 
     441           0 :         old_val = I915_READ(GEN8_DE_PORT_IMR);
     442             : 
     443             :         new_val = old_val;
     444           0 :         new_val &= ~interrupt_mask;
     445           0 :         new_val |= (~enabled_irq_mask & interrupt_mask);
     446             : 
     447           0 :         if (new_val != old_val) {
     448           0 :                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
     449           0 :                 POSTING_READ(GEN8_DE_PORT_IMR);
     450           0 :         }
     451           0 : }
     452             : 
     453             : /**
     454             :  * ibx_display_interrupt_update - update SDEIMR
     455             :  * @dev_priv: driver private
     456             :  * @interrupt_mask: mask of interrupt bits to update
     457             :  * @enabled_irq_mask: mask of interrupt bits to enable
     458             :  */
     459           0 : void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
     460             :                                   uint32_t interrupt_mask,
     461             :                                   uint32_t enabled_irq_mask)
     462             : {
     463           0 :         uint32_t sdeimr = I915_READ(SDEIMR);
     464           0 :         sdeimr &= ~interrupt_mask;
     465           0 :         sdeimr |= (~enabled_irq_mask & interrupt_mask);
     466             : 
     467           0 :         WARN_ON(enabled_irq_mask & ~interrupt_mask);
     468             : 
     469           0 :         assert_spin_locked(&dev_priv->irq_lock);
     470             : 
     471           0 :         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
     472           0 :                 return;
     473             : 
     474           0 :         I915_WRITE(SDEIMR, sdeimr);
     475           0 :         POSTING_READ(SDEIMR);
     476           0 : }
     477             : 
     478             : static void
     479           0 : __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
     480             :                        u32 enable_mask, u32 status_mask)
     481             : {
     482           0 :         u32 reg = PIPESTAT(pipe);
     483           0 :         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
     484             : 
     485           0 :         assert_spin_locked(&dev_priv->irq_lock);
     486           0 :         WARN_ON(!intel_irqs_enabled(dev_priv));
     487             : 
     488           0 :         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
     489             :                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
     490             :                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
     491             :                       pipe_name(pipe), enable_mask, status_mask))
     492           0 :                 return;
     493             : 
     494           0 :         if ((pipestat & enable_mask) == enable_mask)
     495           0 :                 return;
     496             : 
     497           0 :         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
     498             : 
     499             :         /* Enable the interrupt, clear any pending status */
     500           0 :         pipestat |= enable_mask | status_mask;
     501           0 :         I915_WRITE(reg, pipestat);
     502           0 :         POSTING_READ(reg);
     503           0 : }
     504             : 
     505             : static void
     506           0 : __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
     507             :                         u32 enable_mask, u32 status_mask)
     508             : {
     509           0 :         u32 reg = PIPESTAT(pipe);
     510           0 :         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
     511             : 
     512           0 :         assert_spin_locked(&dev_priv->irq_lock);
     513           0 :         WARN_ON(!intel_irqs_enabled(dev_priv));
     514             : 
     515           0 :         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
     516             :                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
     517             :                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
     518             :                       pipe_name(pipe), enable_mask, status_mask))
     519           0 :                 return;
     520             : 
     521           0 :         if ((pipestat & enable_mask) == 0)
     522           0 :                 return;
     523             : 
     524           0 :         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
     525             : 
     526           0 :         pipestat &= ~enable_mask;
     527           0 :         I915_WRITE(reg, pipestat);
     528           0 :         POSTING_READ(reg);
     529           0 : }
     530             : 
     531           0 : static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
     532             : {
     533           0 :         u32 enable_mask = status_mask << 16;
     534             : 
     535             :         /*
     536             :          * On pipe A we don't support the PSR interrupt yet,
     537             :          * on pipe B and C the same bit MBZ.
     538             :          */
     539           0 :         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
     540           0 :                 return 0;
     541             :         /*
     542             :          * On pipe B and C we don't support the PSR interrupt yet, on pipe
     543             :          * A the same bit is for perf counters which we don't use either.
     544             :          */
     545           0 :         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
     546           0 :                 return 0;
     547             : 
     548           0 :         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
     549             :                          SPRITE0_FLIP_DONE_INT_EN_VLV |
     550             :                          SPRITE1_FLIP_DONE_INT_EN_VLV);
     551           0 :         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
     552           0 :                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
     553           0 :         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
     554           0 :                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
     555             : 
     556           0 :         return enable_mask;
     557           0 : }
     558             : 
     559             : void
     560           0 : i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
     561             :                      u32 status_mask)
     562             : {
     563             :         u32 enable_mask;
     564             : 
     565           0 :         if (IS_VALLEYVIEW(dev_priv->dev))
     566           0 :                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
     567             :                                                            status_mask);
     568             :         else
     569           0 :                 enable_mask = status_mask << 16;
     570           0 :         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
     571           0 : }
     572             : 
     573             : void
     574           0 : i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
     575             :                       u32 status_mask)
     576             : {
     577             :         u32 enable_mask;
     578             : 
     579           0 :         if (IS_VALLEYVIEW(dev_priv->dev))
     580           0 :                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
     581             :                                                            status_mask);
     582             :         else
     583           0 :                 enable_mask = status_mask << 16;
     584           0 :         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
     585           0 : }
     586             : 
     587             : /**
     588             :  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
     589             :  * @dev: drm device
     590             :  */
     591           0 : static void i915_enable_asle_pipestat(struct drm_device *dev)
     592             : {
     593           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     594             : 
     595           0 :         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
     596           0 :                 return;
     597             : 
     598           0 :         spin_lock_irq(&dev_priv->irq_lock);
     599             : 
     600           0 :         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
     601           0 :         if (INTEL_INFO(dev)->gen >= 4)
     602           0 :                 i915_enable_pipestat(dev_priv, PIPE_A,
     603             :                                      PIPE_LEGACY_BLC_EVENT_STATUS);
     604             : 
     605           0 :         spin_unlock_irq(&dev_priv->irq_lock);
     606           0 : }
     607             : 
     608             : /*
     609             :  * This timing diagram depicts the video signal in and
     610             :  * around the vertical blanking period.
     611             :  *
     612             :  * Assumptions about the fictitious mode used in this example:
     613             :  *  vblank_start >= 3
     614             :  *  vsync_start = vblank_start + 1
     615             :  *  vsync_end = vblank_start + 2
     616             :  *  vtotal = vblank_start + 3
     617             :  *
     618             :  *           start of vblank:
     619             :  *           latch double buffered registers
     620             :  *           increment frame counter (ctg+)
     621             :  *           generate start of vblank interrupt (gen4+)
     622             :  *           |
     623             :  *           |          frame start:
     624             :  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
     625             :  *           |          may be shifted forward 1-3 extra lines via PIPECONF
     626             :  *           |          |
     627             :  *           |          |  start of vsync:
     628             :  *           |          |  generate vsync interrupt
     629             :  *           |          |  |
     630             :  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
     631             :  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
     632             :  * ----va---> <-----------------vb--------------------> <--------va-------------
     633             :  *       |          |       <----vs----->                     |
     634             :  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
     635             :  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
     636             :  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
     637             :  *       |          |                                         |
     638             :  *       last visible pixel                                   first visible pixel
     639             :  *                  |                                         increment frame counter (gen3/4)
     640             :  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
     641             :  *
     642             :  * x  = horizontal active
     643             :  * _  = horizontal blanking
     644             :  * hs = horizontal sync
     645             :  * va = vertical active
     646             :  * vb = vertical blanking
     647             :  * vs = vertical sync
     648             :  * vbs = vblank_start (number)
     649             :  *
     650             :  * Summary:
     651             :  * - most events happen at the start of horizontal sync
     652             :  * - frame start happens at the start of horizontal blank, 1-4 lines
     653             :  *   (depending on PIPECONF settings) after the start of vblank
     654             :  * - gen3/4 pixel and frame counter are synchronized with the start
     655             :  *   of horizontal active on the first line of vertical active
     656             :  */
     657             : 
     658           0 : static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
     659             : {
     660             :         /* Gen2 doesn't have a hardware frame counter */
     661           0 :         return 0;
     662             : }
     663             : 
     664             : /* Called from drm generic code, passed a 'crtc', which
     665             :  * we use as a pipe index
     666             :  */
     667           0 : static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
     668             : {
     669           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     670             :         unsigned long high_frame;
     671             :         unsigned long low_frame;
     672             :         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
     673             :         struct intel_crtc *intel_crtc =
     674           0 :                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
     675           0 :         const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
     676             : 
     677           0 :         htotal = mode->crtc_htotal;
     678           0 :         hsync_start = mode->crtc_hsync_start;
     679           0 :         vbl_start = mode->crtc_vblank_start;
     680           0 :         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
     681           0 :                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
     682             : 
     683             :         /* Convert to pixel count */
     684           0 :         vbl_start *= htotal;
     685             : 
     686             :         /* Start of vblank event occurs at start of hsync */
     687           0 :         vbl_start -= htotal - hsync_start;
     688             : 
     689           0 :         high_frame = PIPEFRAME(pipe);
     690           0 :         low_frame = PIPEFRAMEPIXEL(pipe);
     691             : 
     692             :         /*
     693             :          * High & low register fields aren't synchronized, so make sure
     694             :          * we get a low value that's stable across two reads of the high
     695             :          * register.
     696             :          */
     697           0 :         do {
     698           0 :                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
     699           0 :                 low   = I915_READ(low_frame);
     700           0 :                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
     701           0 :         } while (high1 != high2);
     702             : 
     703             :         high1 >>= PIPE_FRAME_HIGH_SHIFT;
     704           0 :         pixel = low & PIPE_PIXEL_MASK;
     705           0 :         low >>= PIPE_FRAME_LOW_SHIFT;
     706             : 
     707             :         /*
     708             :          * The frame counter increments at beginning of active.
     709             :          * Cook up a vblank counter by also checking the pixel
     710             :          * counter against vblank start.
     711             :          */
     712           0 :         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
     713             : }
     714             : 
     715           0 : static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
     716             : {
     717           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     718             : 
     719           0 :         return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
     720             : }
     721             : 
     722             : /* raw reads, only for fast reads of display block, no need for forcewake etc. */
     723             : #ifdef __linux__
     724             : #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
     725             : #else
     726             : #define __raw_i915_read32(dev_priv__, reg__) bus_space_read_4((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__))
     727             : #endif
     728             : 
     729           0 : static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
     730             : {
     731           0 :         struct drm_device *dev = crtc->base.dev;
     732           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     733           0 :         const struct drm_display_mode *mode = &crtc->base.hwmode;
     734           0 :         enum pipe pipe = crtc->pipe;
     735             :         int position, vtotal;
     736             : 
     737           0 :         vtotal = mode->crtc_vtotal;
     738           0 :         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
     739           0 :                 vtotal /= 2;
     740             : 
     741           0 :         if (IS_GEN2(dev))
     742           0 :                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
     743             :         else
     744           0 :                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
     745             : 
     746             :         /*
     747             :          * On HSW, the DSL reg (0x70000) appears to return 0 if we
     748             :          * read it just before the start of vblank.  So try it again
     749             :          * so we don't accidentally end up spanning a vblank frame
     750             :          * increment, causing the pipe_update_end() code to squak at us.
     751             :          *
     752             :          * The nature of this problem means we can't simply check the ISR
     753             :          * bit and return the vblank start value; nor can we use the scanline
     754             :          * debug register in the transcoder as it appears to have the same
     755             :          * problem.  We may need to extend this to include other platforms,
     756             :          * but so far testing only shows the problem on HSW.
     757             :          */
     758           0 :         if (HAS_DDI(dev) && !position) {
     759             :                 int i, temp;
     760             : 
     761           0 :                 for (i = 0; i < 100; i++) {
     762           0 :                         udelay(1);
     763           0 :                         temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
     764             :                                 DSL_LINEMASK_GEN3;
     765           0 :                         if (temp != position) {
     766             :                                 position = temp;
     767           0 :                                 break;
     768             :                         }
     769             :                 }
     770           0 :         }
     771             : 
     772             :         /*
     773             :          * See update_scanline_offset() for the details on the
     774             :          * scanline_offset adjustment.
     775             :          */
     776           0 :         return (position + crtc->scanline_offset) % vtotal;
     777             : }
     778             : 
     779           0 : static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
     780             :                                     unsigned int flags, int *vpos, int *hpos,
     781             :                                     ktime_t *stime, ktime_t *etime,
     782             :                                     const struct drm_display_mode *mode)
     783             : {
     784           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     785           0 :         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
     786           0 :         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
     787             :         int position;
     788             :         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
     789             :         bool in_vbl = true;
     790             :         int ret = 0;
     791             :         unsigned long irqflags;
     792             : 
     793           0 :         if (WARN_ON(!mode->crtc_clock)) {
     794             :                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
     795             :                                  "pipe %c\n", pipe_name(pipe));
     796           0 :                 return 0;
     797             :         }
     798             : 
     799           0 :         htotal = mode->crtc_htotal;
     800           0 :         hsync_start = mode->crtc_hsync_start;
     801           0 :         vtotal = mode->crtc_vtotal;
     802           0 :         vbl_start = mode->crtc_vblank_start;
     803           0 :         vbl_end = mode->crtc_vblank_end;
     804             : 
     805           0 :         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
     806           0 :                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
     807           0 :                 vbl_end /= 2;
     808           0 :                 vtotal /= 2;
     809           0 :         }
     810             : 
     811             :         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
     812             : 
     813             :         /*
     814             :          * Lock uncore.lock, as we will do multiple timing critical raw
     815             :          * register reads, potentially with preemption disabled, so the
     816             :          * following code must not block on uncore.lock.
     817             :          */
     818           0 :         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
     819             : 
     820             :         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
     821             : 
     822             :         /* Get optional system timestamp before query. */
     823           0 :         if (stime)
     824           0 :                 *stime = ktime_get();
     825             : 
     826           0 :         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
     827             :                 /* No obvious pixelcount register. Only query vertical
     828             :                  * scanout position from Display scan line register.
     829             :                  */
     830           0 :                 position = __intel_get_crtc_scanline(intel_crtc);
     831           0 :         } else {
     832             :                 /* Have access to pixelcount since start of frame.
     833             :                  * We can split this into vertical and horizontal
     834             :                  * scanout position.
     835             :                  */
     836           0 :                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
     837             : 
     838             :                 /* convert to pixel counts */
     839           0 :                 vbl_start *= htotal;
     840           0 :                 vbl_end *= htotal;
     841           0 :                 vtotal *= htotal;
     842             : 
     843             :                 /*
     844             :                  * In interlaced modes, the pixel counter counts all pixels,
     845             :                  * so one field will have htotal more pixels. In order to avoid
     846             :                  * the reported position from jumping backwards when the pixel
     847             :                  * counter is beyond the length of the shorter field, just
     848             :                  * clamp the position the length of the shorter field. This
     849             :                  * matches how the scanline counter based position works since
     850             :                  * the scanline counter doesn't count the two half lines.
     851             :                  */
     852           0 :                 if (position >= vtotal)
     853           0 :                         position = vtotal - 1;
     854             : 
     855             :                 /*
     856             :                  * Start of vblank interrupt is triggered at start of hsync,
     857             :                  * just prior to the first active line of vblank. However we
     858             :                  * consider lines to start at the leading edge of horizontal
     859             :                  * active. So, should we get here before we've crossed into
     860             :                  * the horizontal active of the first line in vblank, we would
     861             :                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
     862             :                  * always add htotal-hsync_start to the current pixel position.
     863             :                  */
     864           0 :                 position = (position + htotal - hsync_start) % vtotal;
     865             :         }
     866             : 
     867             :         /* Get optional system timestamp after query. */
     868           0 :         if (etime)
     869           0 :                 *etime = ktime_get();
     870             : 
     871             :         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
     872             : 
     873           0 :         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
     874             : 
     875           0 :         in_vbl = position >= vbl_start && position < vbl_end;
     876             : 
     877             :         /*
     878             :          * While in vblank, position will be negative
     879             :          * counting up towards 0 at vbl_end. And outside
     880             :          * vblank, position will be positive counting
     881             :          * up since vbl_end.
     882             :          */
     883           0 :         if (position >= vbl_start)
     884           0 :                 position -= vbl_end;
     885             :         else
     886           0 :                 position += vtotal - vbl_end;
     887             : 
     888           0 :         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
     889           0 :                 *vpos = position;
     890           0 :                 *hpos = 0;
     891           0 :         } else {
     892           0 :                 *vpos = position / htotal;
     893           0 :                 *hpos = position - (*vpos * htotal);
     894             :         }
     895             : 
     896             :         /* In vblank? */
     897           0 :         if (in_vbl)
     898           0 :                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
     899             : 
     900           0 :         return ret;
     901           0 : }
     902             : 
     903           0 : int intel_get_crtc_scanline(struct intel_crtc *crtc)
     904             : {
     905           0 :         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
     906             :         unsigned long irqflags;
     907             :         int position;
     908             : 
     909           0 :         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
     910           0 :         position = __intel_get_crtc_scanline(crtc);
     911           0 :         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
     912             : 
     913           0 :         return position;
     914             : }
     915             : 
     916           0 : static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
     917             :                               int *max_error,
     918             :                               struct timeval *vblank_time,
     919             :                               unsigned flags)
     920             : {
     921             :         struct drm_crtc *crtc;
     922             : 
     923           0 :         if (pipe >= INTEL_INFO(dev)->num_pipes) {
     924           0 :                 DRM_ERROR("Invalid crtc %u\n", pipe);
     925           0 :                 return -EINVAL;
     926             :         }
     927             : 
     928             :         /* Get drm_crtc to timestamp: */
     929           0 :         crtc = intel_get_crtc_for_pipe(dev, pipe);
     930           0 :         if (crtc == NULL) {
     931           0 :                 DRM_ERROR("Invalid crtc %u\n", pipe);
     932           0 :                 return -EINVAL;
     933             :         }
     934             : 
     935           0 :         if (!crtc->hwmode.crtc_clock) {
     936             :                 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
     937           0 :                 return -EBUSY;
     938             :         }
     939             : 
     940             :         /* Helper routine in DRM core does all the work: */
     941           0 :         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
     942             :                                                      vblank_time, flags,
     943             :                                                      &crtc->hwmode);
     944           0 : }
     945             : 
     946           0 : static void ironlake_rps_change_irq_handler(struct drm_device *dev)
     947             : {
     948           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     949             :         u32 busy_up, busy_down, max_avg, min_avg;
     950             :         u8 new_delay;
     951             : 
     952           0 :         spin_lock(&mchdev_lock);
     953             : 
     954           0 :         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
     955             : 
     956           0 :         new_delay = dev_priv->ips.cur_delay;
     957             : 
     958           0 :         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
     959           0 :         busy_up = I915_READ(RCPREVBSYTUPAVG);
     960           0 :         busy_down = I915_READ(RCPREVBSYTDNAVG);
     961           0 :         max_avg = I915_READ(RCBMAXAVG);
     962           0 :         min_avg = I915_READ(RCBMINAVG);
     963             : 
     964             :         /* Handle RCS change request from hw */
     965           0 :         if (busy_up > max_avg) {
     966           0 :                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
     967           0 :                         new_delay = dev_priv->ips.cur_delay - 1;
     968           0 :                 if (new_delay < dev_priv->ips.max_delay)
     969           0 :                         new_delay = dev_priv->ips.max_delay;
     970           0 :         } else if (busy_down < min_avg) {
     971           0 :                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
     972           0 :                         new_delay = dev_priv->ips.cur_delay + 1;
     973           0 :                 if (new_delay > dev_priv->ips.min_delay)
     974           0 :                         new_delay = dev_priv->ips.min_delay;
     975             :         }
     976             : 
     977           0 :         if (ironlake_set_drps(dev, new_delay))
     978           0 :                 dev_priv->ips.cur_delay = new_delay;
     979             : 
     980           0 :         spin_unlock(&mchdev_lock);
     981             : 
     982             :         return;
     983           0 : }
     984             : 
     985           0 : static void notify_ring(struct intel_engine_cs *ring)
     986             : {
     987           0 :         if (!intel_ring_initialized(ring))
     988             :                 return;
     989             : 
     990           0 :         trace_i915_gem_request_notify(ring);
     991             : 
     992           0 :         wake_up_all(&ring->irq_queue);
     993           0 : }
     994             : 
     995           0 : static void vlv_c0_read(struct drm_i915_private *dev_priv,
     996             :                         struct intel_rps_ei *ei)
     997             : {
     998           0 :         ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
     999           0 :         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
    1000           0 :         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
    1001           0 : }
    1002             : 
    1003           0 : void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
    1004             : {
    1005           0 :         memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
    1006           0 : }
    1007             : 
    1008           0 : static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
    1009             : {
    1010           0 :         const struct intel_rps_ei *prev = &dev_priv->rps.ei;
    1011           0 :         struct intel_rps_ei now;
    1012             :         u32 events = 0;
    1013             : 
    1014           0 :         if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
    1015           0 :                 return 0;
    1016             : 
    1017           0 :         vlv_c0_read(dev_priv, &now);
    1018           0 :         if (now.cz_clock == 0)
    1019           0 :                 return 0;
    1020             : 
    1021           0 :         if (prev->cz_clock) {
    1022             :                 u64 time, c0;
    1023             :                 unsigned int mul;
    1024             : 
    1025             :                 mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
    1026           0 :                 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
    1027           0 :                         mul <<= 8;
    1028             : 
    1029           0 :                 time = now.cz_clock - prev->cz_clock;
    1030           0 :                 time *= dev_priv->czclk_freq;
    1031             : 
    1032             :                 /* Workload can be split between render + media,
    1033             :                  * e.g. SwapBuffers being blitted in X after being rendered in
    1034             :                  * mesa. To account for this we need to combine both engines
    1035             :                  * into our activity counter.
    1036             :                  */
    1037           0 :                 c0 = now.render_c0 - prev->render_c0;
    1038           0 :                 c0 += now.media_c0 - prev->media_c0;
    1039           0 :                 c0 *= mul;
    1040             : 
    1041           0 :                 if (c0 > time * dev_priv->rps.up_threshold)
    1042           0 :                         events = GEN6_PM_RP_UP_THRESHOLD;
    1043           0 :                 else if (c0 < time * dev_priv->rps.down_threshold)
    1044           0 :                         events = GEN6_PM_RP_DOWN_THRESHOLD;
    1045           0 :         }
    1046             : 
    1047           0 :         dev_priv->rps.ei = now;
    1048           0 :         return events;
    1049           0 : }
    1050             : 
    1051           0 : static bool any_waiters(struct drm_i915_private *dev_priv)
    1052             : {
    1053             :         struct intel_engine_cs *ring;
    1054             :         int i;
    1055             : 
    1056           0 :         for_each_ring(ring, dev_priv, i)
    1057           0 :                 if (ring->irq_refcount)
    1058           0 :                         return true;
    1059             : 
    1060           0 :         return false;
    1061           0 : }
    1062             : 
    1063           0 : static void gen6_pm_rps_work(struct work_struct *work)
    1064             : {
    1065             :         struct drm_i915_private *dev_priv =
    1066           0 :                 container_of(work, struct drm_i915_private, rps.work);
    1067             :         bool client_boost;
    1068             :         int new_delay, adj, min, max;
    1069             :         u32 pm_iir;
    1070             : 
    1071           0 :         spin_lock_irq(&dev_priv->irq_lock);
    1072             :         /* Speed up work cancelation during disabling rps interrupts. */
    1073           0 :         if (!dev_priv->rps.interrupts_enabled) {
    1074           0 :                 spin_unlock_irq(&dev_priv->irq_lock);
    1075           0 :                 return;
    1076             :         }
    1077           0 :         pm_iir = dev_priv->rps.pm_iir;
    1078           0 :         dev_priv->rps.pm_iir = 0;
    1079             :         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
    1080           0 :         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
    1081           0 :         client_boost = dev_priv->rps.client_boost;
    1082           0 :         dev_priv->rps.client_boost = false;
    1083           0 :         spin_unlock_irq(&dev_priv->irq_lock);
    1084             : 
    1085             :         /* Make sure we didn't queue anything we're not going to process. */
    1086           0 :         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
    1087             : 
    1088           0 :         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
    1089           0 :                 return;
    1090             : 
    1091           0 :         mutex_lock(&dev_priv->rps.hw_lock);
    1092             : 
    1093           0 :         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
    1094             : 
    1095           0 :         adj = dev_priv->rps.last_adj;
    1096           0 :         new_delay = dev_priv->rps.cur_freq;
    1097           0 :         min = dev_priv->rps.min_freq_softlimit;
    1098           0 :         max = dev_priv->rps.max_freq_softlimit;
    1099             : 
    1100           0 :         if (client_boost) {
    1101             :                 new_delay = dev_priv->rps.max_freq_softlimit;
    1102             :                 adj = 0;
    1103           0 :         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
    1104           0 :                 if (adj > 0)
    1105           0 :                         adj *= 2;
    1106             :                 else /* CHV needs even encode values */
    1107           0 :                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
    1108             :                 /*
    1109             :                  * For better performance, jump directly
    1110             :                  * to RPe if we're below it.
    1111             :                  */
    1112           0 :                 if (new_delay < dev_priv->rps.efficient_freq - adj) {
    1113             :                         new_delay = dev_priv->rps.efficient_freq;
    1114             :                         adj = 0;
    1115           0 :                 }
    1116           0 :         } else if (any_waiters(dev_priv)) {
    1117             :                 adj = 0;
    1118           0 :         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
    1119           0 :                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
    1120           0 :                         new_delay = dev_priv->rps.efficient_freq;
    1121             :                 else
    1122           0 :                         new_delay = dev_priv->rps.min_freq_softlimit;
    1123             :                 adj = 0;
    1124           0 :         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
    1125           0 :                 if (adj < 0)
    1126           0 :                         adj *= 2;
    1127             :                 else /* CHV needs even encode values */
    1128           0 :                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
    1129             :         } else { /* unknown event */
    1130             :                 adj = 0;
    1131             :         }
    1132             : 
    1133           0 :         dev_priv->rps.last_adj = adj;
    1134             : 
    1135             :         /* sysfs frequency interfaces may have snuck in while servicing the
    1136             :          * interrupt
    1137             :          */
    1138           0 :         new_delay += adj;
    1139           0 :         new_delay = clamp_t(int, new_delay, min, max);
    1140             : 
    1141           0 :         intel_set_rps(dev_priv->dev, new_delay);
    1142             : 
    1143           0 :         mutex_unlock(&dev_priv->rps.hw_lock);
    1144           0 : }
    1145             : 
    1146             : 
    1147             : /**
    1148             :  * ivybridge_parity_work - Workqueue called when a parity error interrupt
    1149             :  * occurred.
    1150             :  * @work: workqueue struct
    1151             :  *
    1152             :  * Doesn't actually do anything except notify userspace. As a consequence of
    1153             :  * this event, userspace should try to remap the bad rows since statistically
    1154             :  * it is likely the same row is more likely to go bad again.
    1155             :  */
    1156           0 : static void ivybridge_parity_work(struct work_struct *work)
    1157             : {
    1158             :         struct drm_i915_private *dev_priv =
    1159           0 :                 container_of(work, struct drm_i915_private, l3_parity.error_work);
    1160             :         u32 error_status, row, bank, subbank;
    1161             : #ifdef __linux__
    1162             :         char *parity_event[6];
    1163             : #endif
    1164             :         uint32_t misccpctl;
    1165             :         uint8_t slice = 0;
    1166             : 
    1167             :         /* We must turn off DOP level clock gating to access the L3 registers.
    1168             :          * In order to prevent a get/put style interface, acquire struct mutex
    1169             :          * any time we access those registers.
    1170             :          */
    1171           0 :         mutex_lock(&dev_priv->dev->struct_mutex);
    1172             : 
    1173             :         /* If we've screwed up tracking, just let the interrupt fire again */
    1174           0 :         if (WARN_ON(!dev_priv->l3_parity.which_slice))
    1175             :                 goto out;
    1176             : 
    1177           0 :         misccpctl = I915_READ(GEN7_MISCCPCTL);
    1178           0 :         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
    1179           0 :         POSTING_READ(GEN7_MISCCPCTL);
    1180             : 
    1181           0 :         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
    1182             :                 u32 reg;
    1183             : 
    1184           0 :                 slice--;
    1185           0 :                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
    1186           0 :                         break;
    1187             : 
    1188           0 :                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
    1189             : 
    1190           0 :                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
    1191             : 
    1192           0 :                 error_status = I915_READ(reg);
    1193           0 :                 row = GEN7_PARITY_ERROR_ROW(error_status);
    1194           0 :                 bank = GEN7_PARITY_ERROR_BANK(error_status);
    1195           0 :                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
    1196             : 
    1197           0 :                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
    1198           0 :                 POSTING_READ(reg);
    1199             : 
    1200             : #ifdef __linux__
    1201             :                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
    1202             :                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
    1203             :                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
    1204             :                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
    1205             :                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
    1206             :                 parity_event[5] = NULL;
    1207             : 
    1208             :                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
    1209             :                                    KOBJ_CHANGE, parity_event);
    1210             : #endif
    1211             : 
    1212             :                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
    1213             :                           slice, row, bank, subbank);
    1214             : 
    1215             : #ifdef __linux__
    1216             :                 kfree(parity_event[4]);
    1217             :                 kfree(parity_event[3]);
    1218             :                 kfree(parity_event[2]);
    1219             :                 kfree(parity_event[1]);
    1220             : #endif
    1221           0 :         }
    1222             : 
    1223           0 :         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
    1224             : 
    1225             : out:
    1226           0 :         WARN_ON(dev_priv->l3_parity.which_slice);
    1227           0 :         spin_lock_irq(&dev_priv->irq_lock);
    1228           0 :         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
    1229           0 :         spin_unlock_irq(&dev_priv->irq_lock);
    1230             : 
    1231           0 :         mutex_unlock(&dev_priv->dev->struct_mutex);
    1232           0 : }
    1233             : 
    1234           0 : static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
    1235             : {
    1236           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1237             : 
    1238           0 :         if (!HAS_L3_DPF(dev))
    1239           0 :                 return;
    1240             : 
    1241           0 :         spin_lock(&dev_priv->irq_lock);
    1242           0 :         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
    1243           0 :         spin_unlock(&dev_priv->irq_lock);
    1244             : 
    1245           0 :         iir &= GT_PARITY_ERROR(dev);
    1246           0 :         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
    1247           0 :                 dev_priv->l3_parity.which_slice |= 1 << 1;
    1248             : 
    1249           0 :         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
    1250           0 :                 dev_priv->l3_parity.which_slice |= 1 << 0;
    1251             : 
    1252           0 :         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
    1253           0 : }
    1254             : 
    1255           0 : static void ilk_gt_irq_handler(struct drm_device *dev,
    1256             :                                struct drm_i915_private *dev_priv,
    1257             :                                u32 gt_iir)
    1258             : {
    1259           0 :         if (gt_iir &
    1260             :             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
    1261           0 :                 notify_ring(&dev_priv->ring[RCS]);
    1262           0 :         if (gt_iir & ILK_BSD_USER_INTERRUPT)
    1263           0 :                 notify_ring(&dev_priv->ring[VCS]);
    1264           0 : }
    1265             : 
    1266           0 : static void snb_gt_irq_handler(struct drm_device *dev,
    1267             :                                struct drm_i915_private *dev_priv,
    1268             :                                u32 gt_iir)
    1269             : {
    1270             : 
    1271           0 :         if (gt_iir &
    1272             :             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
    1273           0 :                 notify_ring(&dev_priv->ring[RCS]);
    1274           0 :         if (gt_iir & GT_BSD_USER_INTERRUPT)
    1275           0 :                 notify_ring(&dev_priv->ring[VCS]);
    1276           0 :         if (gt_iir & GT_BLT_USER_INTERRUPT)
    1277           0 :                 notify_ring(&dev_priv->ring[BCS]);
    1278             : 
    1279           0 :         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
    1280             :                       GT_BSD_CS_ERROR_INTERRUPT |
    1281             :                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
    1282             :                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
    1283             : 
    1284           0 :         if (gt_iir & GT_PARITY_ERROR(dev))
    1285           0 :                 ivybridge_parity_error_irq_handler(dev, gt_iir);
    1286           0 : }
    1287             : 
    1288           0 : static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
    1289             :                                        u32 master_ctl)
    1290             : {
    1291             :         irqreturn_t ret = IRQ_NONE;
    1292             : 
    1293           0 :         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
    1294           0 :                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
    1295           0 :                 if (tmp) {
    1296           0 :                         I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
    1297             :                         ret = IRQ_HANDLED;
    1298             : 
    1299           0 :                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
    1300           0 :                                 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
    1301           0 :                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
    1302           0 :                                 notify_ring(&dev_priv->ring[RCS]);
    1303             : 
    1304           0 :                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
    1305           0 :                                 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
    1306           0 :                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
    1307           0 :                                 notify_ring(&dev_priv->ring[BCS]);
    1308             :                 } else
    1309           0 :                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
    1310           0 :         }
    1311             : 
    1312           0 :         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
    1313           0 :                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
    1314           0 :                 if (tmp) {
    1315           0 :                         I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
    1316             :                         ret = IRQ_HANDLED;
    1317             : 
    1318           0 :                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
    1319           0 :                                 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
    1320           0 :                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
    1321           0 :                                 notify_ring(&dev_priv->ring[VCS]);
    1322             : 
    1323           0 :                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
    1324           0 :                                 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
    1325           0 :                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
    1326           0 :                                 notify_ring(&dev_priv->ring[VCS2]);
    1327             :                 } else
    1328           0 :                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
    1329           0 :         }
    1330             : 
    1331           0 :         if (master_ctl & GEN8_GT_VECS_IRQ) {
    1332           0 :                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
    1333           0 :                 if (tmp) {
    1334           0 :                         I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
    1335             :                         ret = IRQ_HANDLED;
    1336             : 
    1337           0 :                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
    1338           0 :                                 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
    1339           0 :                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
    1340           0 :                                 notify_ring(&dev_priv->ring[VECS]);
    1341             :                 } else
    1342           0 :                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
    1343           0 :         }
    1344             : 
    1345           0 :         if (master_ctl & GEN8_GT_PM_IRQ) {
    1346           0 :                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
    1347           0 :                 if (tmp & dev_priv->pm_rps_events) {
    1348           0 :                         I915_WRITE_FW(GEN8_GT_IIR(2),
    1349             :                                       tmp & dev_priv->pm_rps_events);
    1350             :                         ret = IRQ_HANDLED;
    1351           0 :                         gen6_rps_irq_handler(dev_priv, tmp);
    1352           0 :                 } else
    1353           0 :                         DRM_ERROR("The master control interrupt lied (PM)!\n");
    1354           0 :         }
    1355             : 
    1356           0 :         return ret;
    1357             : }
    1358             : 
    1359           0 : static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
    1360             : {
    1361           0 :         switch (port) {
    1362             :         case PORT_A:
    1363           0 :                 return val & PORTA_HOTPLUG_LONG_DETECT;
    1364             :         case PORT_B:
    1365           0 :                 return val & PORTB_HOTPLUG_LONG_DETECT;
    1366             :         case PORT_C:
    1367           0 :                 return val & PORTC_HOTPLUG_LONG_DETECT;
    1368             :         default:
    1369           0 :                 return false;
    1370             :         }
    1371           0 : }
    1372             : 
    1373           0 : static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
    1374             : {
    1375           0 :         switch (port) {
    1376             :         case PORT_E:
    1377           0 :                 return val & PORTE_HOTPLUG_LONG_DETECT;
    1378             :         default:
    1379           0 :                 return false;
    1380             :         }
    1381           0 : }
    1382             : 
    1383           0 : static bool spt_port_hotplug_long_detect(enum port port, u32 val)
    1384             : {
    1385           0 :         switch (port) {
    1386             :         case PORT_A:
    1387           0 :                 return val & PORTA_HOTPLUG_LONG_DETECT;
    1388             :         case PORT_B:
    1389           0 :                 return val & PORTB_HOTPLUG_LONG_DETECT;
    1390             :         case PORT_C:
    1391           0 :                 return val & PORTC_HOTPLUG_LONG_DETECT;
    1392             :         case PORT_D:
    1393           0 :                 return val & PORTD_HOTPLUG_LONG_DETECT;
    1394             :         default:
    1395           0 :                 return false;
    1396             :         }
    1397           0 : }
    1398             : 
    1399           0 : static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
    1400             : {
    1401           0 :         switch (port) {
    1402             :         case PORT_A:
    1403           0 :                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
    1404             :         default:
    1405           0 :                 return false;
    1406             :         }
    1407           0 : }
    1408             : 
    1409           0 : static bool pch_port_hotplug_long_detect(enum port port, u32 val)
    1410             : {
    1411           0 :         switch (port) {
    1412             :         case PORT_B:
    1413           0 :                 return val & PORTB_HOTPLUG_LONG_DETECT;
    1414             :         case PORT_C:
    1415           0 :                 return val & PORTC_HOTPLUG_LONG_DETECT;
    1416             :         case PORT_D:
    1417           0 :                 return val & PORTD_HOTPLUG_LONG_DETECT;
    1418             :         default:
    1419           0 :                 return false;
    1420             :         }
    1421           0 : }
    1422             : 
    1423           0 : static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
    1424             : {
    1425           0 :         switch (port) {
    1426             :         case PORT_B:
    1427           0 :                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
    1428             :         case PORT_C:
    1429           0 :                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
    1430             :         case PORT_D:
    1431           0 :                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
    1432             :         default:
    1433           0 :                 return false;
    1434             :         }
    1435           0 : }
    1436             : 
    1437             : /*
    1438             :  * Get a bit mask of pins that have triggered, and which ones may be long.
    1439             :  * This can be called multiple times with the same masks to accumulate
    1440             :  * hotplug detection results from several registers.
    1441             :  *
    1442             :  * Note that the caller is expected to zero out the masks initially.
    1443             :  */
    1444           0 : static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
    1445             :                              u32 hotplug_trigger, u32 dig_hotplug_reg,
    1446             :                              const u32 hpd[HPD_NUM_PINS],
    1447             :                              bool long_pulse_detect(enum port port, u32 val))
    1448             : {
    1449           0 :         enum port port;
    1450             :         int i;
    1451             : 
    1452           0 :         for_each_hpd_pin(i) {
    1453           0 :                 if ((hpd[i] & hotplug_trigger) == 0)
    1454             :                         continue;
    1455             : 
    1456           0 :                 *pin_mask |= BIT(i);
    1457             : 
    1458           0 :                 if (!intel_hpd_pin_to_port(i, &port))
    1459             :                         continue;
    1460             : 
    1461           0 :                 if (long_pulse_detect(port, dig_hotplug_reg))
    1462           0 :                         *long_mask |= BIT(i);
    1463             :         }
    1464             : 
    1465             :         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
    1466             :                          hotplug_trigger, dig_hotplug_reg, *pin_mask);
    1467             : 
    1468           0 : }
    1469             : 
    1470           0 : static void gmbus_irq_handler(struct drm_device *dev)
    1471             : {
    1472           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1473             : 
    1474           0 :         wake_up_all(&dev_priv->gmbus_wait_queue);
    1475           0 : }
    1476             : 
    1477           0 : static void dp_aux_irq_handler(struct drm_device *dev)
    1478             : {
    1479           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1480             : 
    1481           0 :         wake_up_all(&dev_priv->gmbus_wait_queue);
    1482           0 : }
    1483             : 
    1484             : #if defined(CONFIG_DEBUG_FS)
    1485             : static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
    1486             :                                          uint32_t crc0, uint32_t crc1,
    1487             :                                          uint32_t crc2, uint32_t crc3,
    1488             :                                          uint32_t crc4)
    1489             : {
    1490             :         struct drm_i915_private *dev_priv = dev->dev_private;
    1491             :         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
    1492             :         struct intel_pipe_crc_entry *entry;
    1493             :         int head, tail;
    1494             : 
    1495             :         spin_lock(&pipe_crc->lock);
    1496             : 
    1497             :         if (!pipe_crc->entries) {
    1498             :                 spin_unlock(&pipe_crc->lock);
    1499             :                 DRM_DEBUG_KMS("spurious interrupt\n");
    1500             :                 return;
    1501             :         }
    1502             : 
    1503             :         head = pipe_crc->head;
    1504             :         tail = pipe_crc->tail;
    1505             : 
    1506             :         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
    1507             :                 spin_unlock(&pipe_crc->lock);
    1508             :                 DRM_ERROR("CRC buffer overflowing\n");
    1509             :                 return;
    1510             :         }
    1511             : 
    1512             :         entry = &pipe_crc->entries[head];
    1513             : 
    1514             :         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
    1515             :         entry->crc[0] = crc0;
    1516             :         entry->crc[1] = crc1;
    1517             :         entry->crc[2] = crc2;
    1518             :         entry->crc[3] = crc3;
    1519             :         entry->crc[4] = crc4;
    1520             : 
    1521             :         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
    1522             :         pipe_crc->head = head;
    1523             : 
    1524             :         spin_unlock(&pipe_crc->lock);
    1525             : 
    1526             :         wake_up_interruptible(&pipe_crc->wq);
    1527             : }
    1528             : #else
    1529             : static inline void
    1530           0 : display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
    1531             :                              uint32_t crc0, uint32_t crc1,
    1532             :                              uint32_t crc2, uint32_t crc3,
    1533           0 :                              uint32_t crc4) {}
    1534             : #endif
    1535             : 
    1536             : 
    1537           0 : static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
    1538             : {
    1539           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1540             : 
    1541           0 :         display_pipe_crc_irq_handler(dev, pipe,
    1542           0 :                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
    1543             :                                      0, 0, 0, 0);
    1544           0 : }
    1545             : 
    1546           0 : static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
    1547             : {
    1548           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1549             : 
    1550           0 :         display_pipe_crc_irq_handler(dev, pipe,
    1551           0 :                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
    1552           0 :                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
    1553           0 :                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
    1554           0 :                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
    1555           0 :                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
    1556           0 : }
    1557             : 
    1558           0 : static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
    1559             : {
    1560           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1561             :         uint32_t res1, res2;
    1562             : 
    1563           0 :         if (INTEL_INFO(dev)->gen >= 3)
    1564           0 :                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
    1565             :         else
    1566             :                 res1 = 0;
    1567             : 
    1568           0 :         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
    1569           0 :                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
    1570             :         else
    1571             :                 res2 = 0;
    1572             : 
    1573           0 :         display_pipe_crc_irq_handler(dev, pipe,
    1574           0 :                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
    1575           0 :                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
    1576           0 :                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
    1577             :                                      res1, res2);
    1578           0 : }
    1579             : 
    1580             : /* The RPS events need forcewake, so we add them to a work queue and mask their
    1581             :  * IMR bits until the work is done. Other interrupts can be processed without
    1582             :  * the work queue. */
    1583           0 : static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
    1584             : {
    1585           0 :         if (pm_iir & dev_priv->pm_rps_events) {
    1586           0 :                 spin_lock(&dev_priv->irq_lock);
    1587           0 :                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
    1588           0 :                 if (dev_priv->rps.interrupts_enabled) {
    1589           0 :                         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
    1590           0 :                         queue_work(dev_priv->wq, &dev_priv->rps.work);
    1591           0 :                 }
    1592           0 :                 spin_unlock(&dev_priv->irq_lock);
    1593           0 :         }
    1594             : 
    1595           0 :         if (INTEL_INFO(dev_priv)->gen >= 8)
    1596             :                 return;
    1597             : 
    1598           0 :         if (HAS_VEBOX(dev_priv->dev)) {
    1599           0 :                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
    1600           0 :                         notify_ring(&dev_priv->ring[VECS]);
    1601             : 
    1602           0 :                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
    1603             :                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
    1604           0 :         }
    1605           0 : }
    1606             : 
    1607           0 : static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
    1608             : {
    1609           0 :         if (!drm_handle_vblank(dev, pipe))
    1610           0 :                 return false;
    1611             : 
    1612           0 :         return true;
    1613           0 : }
    1614             : 
    1615           0 : static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
    1616             : {
    1617           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1618           0 :         u32 pipe_stats[I915_MAX_PIPES] = { };
    1619             :         int pipe;
    1620             : 
    1621           0 :         spin_lock(&dev_priv->irq_lock);
    1622           0 :         for_each_pipe(dev_priv, pipe) {
    1623             :                 int reg;
    1624             :                 u32 mask, iir_bit = 0;
    1625             : 
    1626             :                 /*
    1627             :                  * PIPESTAT bits get signalled even when the interrupt is
    1628             :                  * disabled with the mask bits, and some of the status bits do
    1629             :                  * not generate interrupts at all (like the underrun bit). Hence
    1630             :                  * we need to be careful that we only handle what we want to
    1631             :                  * handle.
    1632             :                  */
    1633             : 
    1634             :                 /* fifo underruns are filterered in the underrun handler. */
    1635             :                 mask = PIPE_FIFO_UNDERRUN_STATUS;
    1636             : 
    1637           0 :                 switch (pipe) {
    1638             :                 case PIPE_A:
    1639             :                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
    1640           0 :                         break;
    1641             :                 case PIPE_B:
    1642             :                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
    1643           0 :                         break;
    1644             :                 case PIPE_C:
    1645             :                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
    1646           0 :                         break;
    1647             :                 }
    1648           0 :                 if (iir & iir_bit)
    1649           0 :                         mask |= dev_priv->pipestat_irq_mask[pipe];
    1650             : 
    1651           0 :                 if (!mask)
    1652           0 :                         continue;
    1653             : 
    1654           0 :                 reg = PIPESTAT(pipe);
    1655           0 :                 mask |= PIPESTAT_INT_ENABLE_MASK;
    1656           0 :                 pipe_stats[pipe] = I915_READ(reg) & mask;
    1657             : 
    1658             :                 /*
    1659             :                  * Clear the PIPE*STAT regs before the IIR
    1660             :                  */
    1661           0 :                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
    1662             :                                         PIPESTAT_INT_STATUS_MASK))
    1663           0 :                         I915_WRITE(reg, pipe_stats[pipe]);
    1664           0 :         }
    1665           0 :         spin_unlock(&dev_priv->irq_lock);
    1666             : 
    1667           0 :         for_each_pipe(dev_priv, pipe) {
    1668           0 :                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
    1669           0 :                     intel_pipe_handle_vblank(dev, pipe))
    1670           0 :                         intel_check_page_flip(dev, pipe);
    1671             : 
    1672           0 :                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
    1673           0 :                         intel_prepare_page_flip(dev, pipe);
    1674           0 :                         intel_finish_page_flip(dev, pipe);
    1675           0 :                 }
    1676             : 
    1677           0 :                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
    1678           0 :                         i9xx_pipe_crc_irq_handler(dev, pipe);
    1679             : 
    1680           0 :                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
    1681           0 :                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
    1682             :         }
    1683             : 
    1684           0 :         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
    1685           0 :                 gmbus_irq_handler(dev);
    1686           0 : }
    1687             : 
    1688           0 : static void i9xx_hpd_irq_handler(struct drm_device *dev)
    1689             : {
    1690           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1691           0 :         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
    1692           0 :         u32 pin_mask = 0, long_mask = 0;
    1693             : 
    1694           0 :         if (!hotplug_status)
    1695           0 :                 return;
    1696             : 
    1697           0 :         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
    1698             :         /*
    1699             :          * Make sure hotplug status is cleared before we clear IIR, or else we
    1700             :          * may miss hotplug events.
    1701             :          */
    1702           0 :         POSTING_READ(PORT_HOTPLUG_STAT);
    1703             : 
    1704           0 :         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
    1705           0 :                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
    1706             : 
    1707           0 :                 if (hotplug_trigger) {
    1708           0 :                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
    1709             :                                            hotplug_trigger, hpd_status_g4x,
    1710             :                                            i9xx_port_hotplug_long_detect);
    1711             : 
    1712           0 :                         intel_hpd_irq_handler(dev, pin_mask, long_mask);
    1713           0 :                 }
    1714             : 
    1715           0 :                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
    1716           0 :                         dp_aux_irq_handler(dev);
    1717           0 :         } else {
    1718           0 :                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
    1719             : 
    1720           0 :                 if (hotplug_trigger) {
    1721           0 :                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
    1722             :                                            hotplug_trigger, hpd_status_i915,
    1723             :                                            i9xx_port_hotplug_long_detect);
    1724           0 :                         intel_hpd_irq_handler(dev, pin_mask, long_mask);
    1725           0 :                 }
    1726             :         }
    1727           0 : }
    1728             : 
    1729           0 : static irqreturn_t valleyview_irq_handler(int irq, void *arg)
    1730             : {
    1731           0 :         struct drm_device *dev = arg;
    1732           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1733             :         u32 iir, gt_iir, pm_iir;
    1734             :         irqreturn_t ret = IRQ_NONE;
    1735             : 
    1736           0 :         if (!intel_irqs_enabled(dev_priv))
    1737           0 :                 return IRQ_NONE;
    1738             : 
    1739           0 :         while (true) {
    1740             :                 /* Find, clear, then process each source of interrupt */
    1741             : 
    1742           0 :                 gt_iir = I915_READ(GTIIR);
    1743           0 :                 if (gt_iir)
    1744           0 :                         I915_WRITE(GTIIR, gt_iir);
    1745             : 
    1746           0 :                 pm_iir = I915_READ(GEN6_PMIIR);
    1747           0 :                 if (pm_iir)
    1748           0 :                         I915_WRITE(GEN6_PMIIR, pm_iir);
    1749             : 
    1750           0 :                 iir = I915_READ(VLV_IIR);
    1751           0 :                 if (iir) {
    1752             :                         /* Consume port before clearing IIR or we'll miss events */
    1753           0 :                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
    1754           0 :                                 i9xx_hpd_irq_handler(dev);
    1755           0 :                         I915_WRITE(VLV_IIR, iir);
    1756           0 :                 }
    1757             : 
    1758           0 :                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
    1759             :                         goto out;
    1760             : 
    1761             :                 ret = IRQ_HANDLED;
    1762             : 
    1763           0 :                 if (gt_iir)
    1764           0 :                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
    1765           0 :                 if (pm_iir)
    1766           0 :                         gen6_rps_irq_handler(dev_priv, pm_iir);
    1767             :                 /* Call regardless, as some status bits might not be
    1768             :                  * signalled in iir */
    1769           0 :                 valleyview_pipestat_irq_handler(dev, iir);
    1770             :         }
    1771             : 
    1772             : out:
    1773           0 :         return ret;
    1774           0 : }
    1775             : 
    1776           0 : static irqreturn_t cherryview_irq_handler(int irq, void *arg)
    1777             : {
    1778           0 :         struct drm_device *dev = arg;
    1779           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1780             :         u32 master_ctl, iir;
    1781             :         irqreturn_t ret = IRQ_NONE;
    1782             : 
    1783           0 :         if (!intel_irqs_enabled(dev_priv))
    1784           0 :                 return IRQ_NONE;
    1785             : 
    1786           0 :         for (;;) {
    1787           0 :                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
    1788           0 :                 iir = I915_READ(VLV_IIR);
    1789             : 
    1790           0 :                 if (master_ctl == 0 && iir == 0)
    1791             :                         break;
    1792             : 
    1793             :                 ret = IRQ_HANDLED;
    1794             : 
    1795           0 :                 I915_WRITE(GEN8_MASTER_IRQ, 0);
    1796             : 
    1797             :                 /* Find, clear, then process each source of interrupt */
    1798             : 
    1799           0 :                 if (iir) {
    1800             :                         /* Consume port before clearing IIR or we'll miss events */
    1801           0 :                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
    1802           0 :                                 i9xx_hpd_irq_handler(dev);
    1803           0 :                         I915_WRITE(VLV_IIR, iir);
    1804           0 :                 }
    1805             : 
    1806           0 :                 gen8_gt_irq_handler(dev_priv, master_ctl);
    1807             : 
    1808             :                 /* Call regardless, as some status bits might not be
    1809             :                  * signalled in iir */
    1810           0 :                 valleyview_pipestat_irq_handler(dev, iir);
    1811             : 
    1812           0 :                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
    1813           0 :                 POSTING_READ(GEN8_MASTER_IRQ);
    1814             :         }
    1815             : 
    1816           0 :         return ret;
    1817           0 : }
    1818             : 
    1819           0 : static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
    1820             :                                 const u32 hpd[HPD_NUM_PINS])
    1821             : {
    1822           0 :         struct drm_i915_private *dev_priv = to_i915(dev);
    1823           0 :         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
    1824             : 
    1825           0 :         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
    1826           0 :         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
    1827             : 
    1828           0 :         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
    1829             :                            dig_hotplug_reg, hpd,
    1830             :                            pch_port_hotplug_long_detect);
    1831             : 
    1832           0 :         intel_hpd_irq_handler(dev, pin_mask, long_mask);
    1833           0 : }
    1834             : 
    1835           0 : static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
    1836             : {
    1837           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1838             :         int pipe;
    1839           0 :         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
    1840             : 
    1841           0 :         if (hotplug_trigger)
    1842           0 :                 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
    1843             : 
    1844             : #ifdef __linux__
    1845             :         if (pch_iir & SDE_AUDIO_POWER_MASK) {
    1846             :                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
    1847             :                                SDE_AUDIO_POWER_SHIFT);
    1848             :                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
    1849             :                                  port_name(port));
    1850             :         }
    1851             : #endif
    1852             : 
    1853           0 :         if (pch_iir & SDE_AUX_MASK)
    1854           0 :                 dp_aux_irq_handler(dev);
    1855             : 
    1856           0 :         if (pch_iir & SDE_GMBUS)
    1857           0 :                 gmbus_irq_handler(dev);
    1858             : 
    1859           0 :         if (pch_iir & SDE_AUDIO_HDCP_MASK)
    1860             :                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
    1861             : 
    1862           0 :         if (pch_iir & SDE_AUDIO_TRANS_MASK)
    1863             :                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
    1864             : 
    1865           0 :         if (pch_iir & SDE_POISON)
    1866           0 :                 DRM_ERROR("PCH poison interrupt\n");
    1867             : 
    1868           0 :         if (pch_iir & SDE_FDI_MASK)
    1869           0 :                 for_each_pipe(dev_priv, pipe)
    1870             :                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
    1871             :                                          pipe_name(pipe),
    1872             :                                          I915_READ(FDI_RX_IIR(pipe)));
    1873             : 
    1874           0 :         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
    1875             :                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
    1876             : 
    1877           0 :         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
    1878             :                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
    1879             : 
    1880           0 :         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
    1881           0 :                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
    1882             : 
    1883           0 :         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
    1884           0 :                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
    1885           0 : }
    1886             : 
    1887           0 : static void ivb_err_int_handler(struct drm_device *dev)
    1888             : {
    1889           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1890           0 :         u32 err_int = I915_READ(GEN7_ERR_INT);
    1891             :         enum pipe pipe;
    1892             : 
    1893           0 :         if (err_int & ERR_INT_POISON)
    1894           0 :                 DRM_ERROR("Poison interrupt\n");
    1895             : 
    1896           0 :         for_each_pipe(dev_priv, pipe) {
    1897           0 :                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
    1898           0 :                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
    1899             : 
    1900           0 :                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
    1901           0 :                         if (IS_IVYBRIDGE(dev))
    1902           0 :                                 ivb_pipe_crc_irq_handler(dev, pipe);
    1903             :                         else
    1904           0 :                                 hsw_pipe_crc_irq_handler(dev, pipe);
    1905             :                 }
    1906             :         }
    1907             : 
    1908           0 :         I915_WRITE(GEN7_ERR_INT, err_int);
    1909           0 : }
    1910             : 
    1911           0 : static void cpt_serr_int_handler(struct drm_device *dev)
    1912             : {
    1913           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1914           0 :         u32 serr_int = I915_READ(SERR_INT);
    1915             : 
    1916           0 :         if (serr_int & SERR_INT_POISON)
    1917           0 :                 DRM_ERROR("PCH poison interrupt\n");
    1918             : 
    1919           0 :         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
    1920           0 :                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
    1921             : 
    1922           0 :         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
    1923           0 :                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
    1924             : 
    1925           0 :         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
    1926           0 :                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
    1927             : 
    1928           0 :         I915_WRITE(SERR_INT, serr_int);
    1929           0 : }
    1930             : 
    1931           0 : static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
    1932             : {
    1933           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1934             :         int pipe;
    1935           0 :         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
    1936             : 
    1937           0 :         if (hotplug_trigger)
    1938           0 :                 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
    1939             : 
    1940             : #ifdef __linux__
    1941             :         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
    1942             :                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
    1943             :                                SDE_AUDIO_POWER_SHIFT_CPT);
    1944             :                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
    1945             :                                  port_name(port));
    1946             :         }
    1947             : #endif
    1948             : 
    1949           0 :         if (pch_iir & SDE_AUX_MASK_CPT)
    1950           0 :                 dp_aux_irq_handler(dev);
    1951             : 
    1952           0 :         if (pch_iir & SDE_GMBUS_CPT)
    1953           0 :                 gmbus_irq_handler(dev);
    1954             : 
    1955           0 :         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
    1956             :                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
    1957             : 
    1958           0 :         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
    1959             :                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
    1960             : 
    1961           0 :         if (pch_iir & SDE_FDI_MASK_CPT)
    1962           0 :                 for_each_pipe(dev_priv, pipe)
    1963             :                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
    1964             :                                          pipe_name(pipe),
    1965             :                                          I915_READ(FDI_RX_IIR(pipe)));
    1966             : 
    1967           0 :         if (pch_iir & SDE_ERROR_CPT)
    1968           0 :                 cpt_serr_int_handler(dev);
    1969           0 : }
    1970             : 
    1971           0 : static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
    1972             : {
    1973           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1974           0 :         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
    1975             :                 ~SDE_PORTE_HOTPLUG_SPT;
    1976           0 :         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
    1977           0 :         u32 pin_mask = 0, long_mask = 0;
    1978             : 
    1979           0 :         if (hotplug_trigger) {
    1980             :                 u32 dig_hotplug_reg;
    1981             : 
    1982           0 :                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
    1983           0 :                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
    1984             : 
    1985           0 :                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
    1986             :                                    dig_hotplug_reg, hpd_spt,
    1987             :                                    spt_port_hotplug_long_detect);
    1988           0 :         }
    1989             : 
    1990           0 :         if (hotplug2_trigger) {
    1991             :                 u32 dig_hotplug_reg;
    1992             : 
    1993           0 :                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
    1994           0 :                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
    1995             : 
    1996           0 :                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
    1997             :                                    dig_hotplug_reg, hpd_spt,
    1998             :                                    spt_port_hotplug2_long_detect);
    1999           0 :         }
    2000             : 
    2001           0 :         if (pin_mask)
    2002           0 :                 intel_hpd_irq_handler(dev, pin_mask, long_mask);
    2003             : 
    2004           0 :         if (pch_iir & SDE_GMBUS_CPT)
    2005           0 :                 gmbus_irq_handler(dev);
    2006           0 : }
    2007             : 
    2008           0 : static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
    2009             :                                 const u32 hpd[HPD_NUM_PINS])
    2010             : {
    2011           0 :         struct drm_i915_private *dev_priv = to_i915(dev);
    2012           0 :         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
    2013             : 
    2014           0 :         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
    2015           0 :         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
    2016             : 
    2017           0 :         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
    2018             :                            dig_hotplug_reg, hpd,
    2019             :                            ilk_port_hotplug_long_detect);
    2020             : 
    2021           0 :         intel_hpd_irq_handler(dev, pin_mask, long_mask);
    2022           0 : }
    2023             : 
    2024           0 : static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
    2025             : {
    2026           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2027             :         enum pipe pipe;
    2028           0 :         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
    2029             : 
    2030           0 :         if (hotplug_trigger)
    2031           0 :                 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
    2032             : 
    2033           0 :         if (de_iir & DE_AUX_CHANNEL_A)
    2034           0 :                 dp_aux_irq_handler(dev);
    2035             : 
    2036           0 :         if (de_iir & DE_GSE)
    2037           0 :                 intel_opregion_asle_intr(dev);
    2038             : 
    2039           0 :         if (de_iir & DE_POISON)
    2040           0 :                 DRM_ERROR("Poison interrupt\n");
    2041             : 
    2042           0 :         for_each_pipe(dev_priv, pipe) {
    2043           0 :                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
    2044           0 :                     intel_pipe_handle_vblank(dev, pipe))
    2045           0 :                         intel_check_page_flip(dev, pipe);
    2046             : 
    2047           0 :                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
    2048           0 :                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
    2049             : 
    2050           0 :                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
    2051           0 :                         i9xx_pipe_crc_irq_handler(dev, pipe);
    2052             : 
    2053             :                 /* plane/pipes map 1:1 on ilk+ */
    2054           0 :                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
    2055           0 :                         intel_prepare_page_flip(dev, pipe);
    2056           0 :                         intel_finish_page_flip_plane(dev, pipe);
    2057           0 :                 }
    2058             :         }
    2059             : 
    2060             :         /* check event from PCH */
    2061           0 :         if (de_iir & DE_PCH_EVENT) {
    2062           0 :                 u32 pch_iir = I915_READ(SDEIIR);
    2063             : 
    2064           0 :                 if (HAS_PCH_CPT(dev))
    2065           0 :                         cpt_irq_handler(dev, pch_iir);
    2066             :                 else
    2067           0 :                         ibx_irq_handler(dev, pch_iir);
    2068             : 
    2069             :                 /* should clear PCH hotplug event before clear CPU irq */
    2070           0 :                 I915_WRITE(SDEIIR, pch_iir);
    2071           0 :         }
    2072             : 
    2073           0 :         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
    2074           0 :                 ironlake_rps_change_irq_handler(dev);
    2075           0 : }
    2076             : 
    2077           0 : static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
    2078             : {
    2079           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2080             :         enum pipe pipe;
    2081           0 :         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
    2082             : 
    2083           0 :         if (hotplug_trigger)
    2084           0 :                 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
    2085             : 
    2086           0 :         if (de_iir & DE_ERR_INT_IVB)
    2087           0 :                 ivb_err_int_handler(dev);
    2088             : 
    2089           0 :         if (de_iir & DE_AUX_CHANNEL_A_IVB)
    2090           0 :                 dp_aux_irq_handler(dev);
    2091             : 
    2092           0 :         if (de_iir & DE_GSE_IVB)
    2093           0 :                 intel_opregion_asle_intr(dev);
    2094             : 
    2095           0 :         for_each_pipe(dev_priv, pipe) {
    2096           0 :                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
    2097           0 :                     intel_pipe_handle_vblank(dev, pipe))
    2098           0 :                         intel_check_page_flip(dev, pipe);
    2099             : 
    2100             :                 /* plane/pipes map 1:1 on ilk+ */
    2101           0 :                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
    2102           0 :                         intel_prepare_page_flip(dev, pipe);
    2103           0 :                         intel_finish_page_flip_plane(dev, pipe);
    2104           0 :                 }
    2105             :         }
    2106             : 
    2107             :         /* check event from PCH */
    2108           0 :         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
    2109           0 :                 u32 pch_iir = I915_READ(SDEIIR);
    2110             : 
    2111           0 :                 cpt_irq_handler(dev, pch_iir);
    2112             : 
    2113             :                 /* clear PCH hotplug event before clear CPU irq */
    2114           0 :                 I915_WRITE(SDEIIR, pch_iir);
    2115           0 :         }
    2116           0 : }
    2117             : 
    2118             : /*
    2119             :  * To handle irqs with the minimum potential races with fresh interrupts, we:
    2120             :  * 1 - Disable Master Interrupt Control.
    2121             :  * 2 - Find the source(s) of the interrupt.
    2122             :  * 3 - Clear the Interrupt Identity bits (IIR).
    2123             :  * 4 - Process the interrupt(s) that had bits set in the IIRs.
    2124             :  * 5 - Re-enable Master Interrupt Control.
    2125             :  */
    2126           0 : static irqreturn_t ironlake_irq_handler(int irq, void *arg)
    2127             : {
    2128           0 :         struct drm_device *dev = arg;
    2129           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2130             :         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
    2131             :         irqreturn_t ret = IRQ_NONE;
    2132             : 
    2133           0 :         if (!intel_irqs_enabled(dev_priv))
    2134           0 :                 return IRQ_NONE;
    2135             : 
    2136             :         /* We get interrupts on unclaimed registers, so check for this before we
    2137             :          * do any I915_{READ,WRITE}. */
    2138           0 :         intel_uncore_check_errors(dev);
    2139             : 
    2140             :         /* disable master interrupt before clearing iir  */
    2141           0 :         de_ier = I915_READ(DEIER);
    2142           0 :         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
    2143           0 :         POSTING_READ(DEIER);
    2144             : 
    2145             :         /* Disable south interrupts. We'll only write to SDEIIR once, so further
    2146             :          * interrupts will will be stored on its back queue, and then we'll be
    2147             :          * able to process them after we restore SDEIER (as soon as we restore
    2148             :          * it, we'll get an interrupt if SDEIIR still has something to process
    2149             :          * due to its back queue). */
    2150           0 :         if (!HAS_PCH_NOP(dev)) {
    2151           0 :                 sde_ier = I915_READ(SDEIER);
    2152           0 :                 I915_WRITE(SDEIER, 0);
    2153           0 :                 POSTING_READ(SDEIER);
    2154           0 :         }
    2155             : 
    2156             :         /* Find, clear, then process each source of interrupt */
    2157             : 
    2158           0 :         gt_iir = I915_READ(GTIIR);
    2159           0 :         if (gt_iir) {
    2160           0 :                 I915_WRITE(GTIIR, gt_iir);
    2161             :                 ret = IRQ_HANDLED;
    2162           0 :                 if (INTEL_INFO(dev)->gen >= 6)
    2163           0 :                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
    2164             :                 else
    2165           0 :                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
    2166             :         }
    2167             : 
    2168           0 :         de_iir = I915_READ(DEIIR);
    2169           0 :         if (de_iir) {
    2170           0 :                 I915_WRITE(DEIIR, de_iir);
    2171             :                 ret = IRQ_HANDLED;
    2172           0 :                 if (INTEL_INFO(dev)->gen >= 7)
    2173           0 :                         ivb_display_irq_handler(dev, de_iir);
    2174             :                 else
    2175           0 :                         ilk_display_irq_handler(dev, de_iir);
    2176             :         }
    2177             : 
    2178           0 :         if (INTEL_INFO(dev)->gen >= 6) {
    2179           0 :                 u32 pm_iir = I915_READ(GEN6_PMIIR);
    2180           0 :                 if (pm_iir) {
    2181           0 :                         I915_WRITE(GEN6_PMIIR, pm_iir);
    2182             :                         ret = IRQ_HANDLED;
    2183           0 :                         gen6_rps_irq_handler(dev_priv, pm_iir);
    2184           0 :                 }
    2185           0 :         }
    2186             : 
    2187           0 :         I915_WRITE(DEIER, de_ier);
    2188           0 :         POSTING_READ(DEIER);
    2189           0 :         if (!HAS_PCH_NOP(dev)) {
    2190           0 :                 I915_WRITE(SDEIER, sde_ier);
    2191           0 :                 POSTING_READ(SDEIER);
    2192           0 :         }
    2193             : 
    2194           0 :         return ret;
    2195           0 : }
    2196             : 
    2197           0 : static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
    2198             :                                 const u32 hpd[HPD_NUM_PINS])
    2199             : {
    2200           0 :         struct drm_i915_private *dev_priv = to_i915(dev);
    2201           0 :         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
    2202             : 
    2203           0 :         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
    2204           0 :         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
    2205             : 
    2206           0 :         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
    2207             :                            dig_hotplug_reg, hpd,
    2208             :                            bxt_port_hotplug_long_detect);
    2209             : 
    2210           0 :         intel_hpd_irq_handler(dev, pin_mask, long_mask);
    2211           0 : }
    2212             : 
    2213           0 : static irqreturn_t gen8_irq_handler(int irq, void *arg)
    2214             : {
    2215           0 :         struct drm_device *dev = arg;
    2216           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2217             :         u32 master_ctl;
    2218             :         irqreturn_t ret = IRQ_NONE;
    2219             :         uint32_t tmp = 0;
    2220             :         enum pipe pipe;
    2221             :         u32 aux_mask = GEN8_AUX_CHANNEL_A;
    2222             : 
    2223           0 :         if (!intel_irqs_enabled(dev_priv))
    2224           0 :                 return IRQ_NONE;
    2225             : 
    2226           0 :         if (INTEL_INFO(dev_priv)->gen >= 9)
    2227           0 :                 aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
    2228             :                         GEN9_AUX_CHANNEL_D;
    2229             : 
    2230           0 :         master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
    2231           0 :         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
    2232           0 :         if (!master_ctl)
    2233           0 :                 return IRQ_NONE;
    2234             : 
    2235           0 :         I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
    2236             : 
    2237             :         /* Find, clear, then process each source of interrupt */
    2238             : 
    2239           0 :         ret = gen8_gt_irq_handler(dev_priv, master_ctl);
    2240             : 
    2241           0 :         if (master_ctl & GEN8_DE_MISC_IRQ) {
    2242           0 :                 tmp = I915_READ(GEN8_DE_MISC_IIR);
    2243           0 :                 if (tmp) {
    2244           0 :                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
    2245             :                         ret = IRQ_HANDLED;
    2246           0 :                         if (tmp & GEN8_DE_MISC_GSE)
    2247           0 :                                 intel_opregion_asle_intr(dev);
    2248             :                         else
    2249           0 :                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
    2250             :                 }
    2251             :                 else
    2252           0 :                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
    2253             :         }
    2254             : 
    2255           0 :         if (master_ctl & GEN8_DE_PORT_IRQ) {
    2256           0 :                 tmp = I915_READ(GEN8_DE_PORT_IIR);
    2257           0 :                 if (tmp) {
    2258             :                         bool found = false;
    2259             :                         u32 hotplug_trigger = 0;
    2260             : 
    2261           0 :                         if (IS_BROXTON(dev_priv))
    2262           0 :                                 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
    2263           0 :                         else if (IS_BROADWELL(dev_priv))
    2264           0 :                                 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
    2265             : 
    2266           0 :                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
    2267             :                         ret = IRQ_HANDLED;
    2268             : 
    2269           0 :                         if (tmp & aux_mask) {
    2270           0 :                                 dp_aux_irq_handler(dev);
    2271             :                                 found = true;
    2272           0 :                         }
    2273             : 
    2274           0 :                         if (hotplug_trigger) {
    2275           0 :                                 if (IS_BROXTON(dev))
    2276           0 :                                         bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
    2277             :                                 else
    2278           0 :                                         ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
    2279             :                                 found = true;
    2280           0 :                         }
    2281             : 
    2282           0 :                         if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
    2283           0 :                                 gmbus_irq_handler(dev);
    2284             :                                 found = true;
    2285           0 :                         }
    2286             : 
    2287           0 :                         if (!found)
    2288           0 :                                 DRM_ERROR("Unexpected DE Port interrupt\n");
    2289           0 :                 }
    2290             :                 else
    2291           0 :                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
    2292             :         }
    2293             : 
    2294           0 :         for_each_pipe(dev_priv, pipe) {
    2295             :                 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
    2296             : 
    2297           0 :                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
    2298           0 :                         continue;
    2299             : 
    2300           0 :                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
    2301           0 :                 if (pipe_iir) {
    2302             :                         ret = IRQ_HANDLED;
    2303           0 :                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
    2304             : 
    2305           0 :                         if (pipe_iir & GEN8_PIPE_VBLANK &&
    2306           0 :                             intel_pipe_handle_vblank(dev, pipe))
    2307           0 :                                 intel_check_page_flip(dev, pipe);
    2308             : 
    2309           0 :                         if (INTEL_INFO(dev_priv)->gen >= 9)
    2310           0 :                                 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
    2311             :                         else
    2312           0 :                                 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
    2313             : 
    2314           0 :                         if (flip_done) {
    2315           0 :                                 intel_prepare_page_flip(dev, pipe);
    2316           0 :                                 intel_finish_page_flip_plane(dev, pipe);
    2317           0 :                         }
    2318             : 
    2319           0 :                         if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
    2320           0 :                                 hsw_pipe_crc_irq_handler(dev, pipe);
    2321             : 
    2322           0 :                         if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
    2323           0 :                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
    2324             :                                                                     pipe);
    2325             : 
    2326             : 
    2327           0 :                         if (INTEL_INFO(dev_priv)->gen >= 9)
    2328           0 :                                 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
    2329             :                         else
    2330           0 :                                 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
    2331             : 
    2332           0 :                         if (fault_errors)
    2333           0 :                                 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
    2334             :                                           pipe_name(pipe),
    2335             :                                           pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
    2336             :                 } else
    2337           0 :                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
    2338           0 :         }
    2339             : 
    2340           0 :         if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
    2341           0 :             master_ctl & GEN8_DE_PCH_IRQ) {
    2342             :                 /*
    2343             :                  * FIXME(BDW): Assume for now that the new interrupt handling
    2344             :                  * scheme also closed the SDE interrupt handling race we've seen
    2345             :                  * on older pch-split platforms. But this needs testing.
    2346             :                  */
    2347           0 :                 u32 pch_iir = I915_READ(SDEIIR);
    2348           0 :                 if (pch_iir) {
    2349           0 :                         I915_WRITE(SDEIIR, pch_iir);
    2350             :                         ret = IRQ_HANDLED;
    2351             : 
    2352           0 :                         if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
    2353           0 :                                 spt_irq_handler(dev, pch_iir);
    2354             :                         else
    2355           0 :                                 cpt_irq_handler(dev, pch_iir);
    2356             :                 } else {
    2357             :                         /*
    2358             :                          * Like on previous PCH there seems to be something
    2359             :                          * fishy going on with forwarding PCH interrupts.
    2360             :                          */
    2361             :                         DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
    2362             :                 }
    2363           0 :         }
    2364             : 
    2365           0 :         I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
    2366           0 :         POSTING_READ_FW(GEN8_MASTER_IRQ);
    2367             : 
    2368           0 :         return ret;
    2369           0 : }
    2370             : 
    2371           0 : static void i915_error_wake_up(struct drm_i915_private *dev_priv,
    2372             :                                bool reset_completed)
    2373             : {
    2374             :         struct intel_engine_cs *ring;
    2375             :         int i;
    2376             : 
    2377             :         /*
    2378             :          * Notify all waiters for GPU completion events that reset state has
    2379             :          * been changed, and that they need to restart their wait after
    2380             :          * checking for potential errors (and bail out to drop locks if there is
    2381             :          * a gpu reset pending so that i915_error_work_func can acquire them).
    2382             :          */
    2383             : 
    2384             :         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
    2385           0 :         for_each_ring(ring, dev_priv, i)
    2386           0 :                 wake_up_all(&ring->irq_queue);
    2387             : 
    2388             :         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
    2389           0 :         wake_up_all(&dev_priv->pending_flip_queue);
    2390             : 
    2391             :         /*
    2392             :          * Signal tasks blocked in i915_gem_wait_for_error that the pending
    2393             :          * reset state is cleared.
    2394             :          */
    2395           0 :         if (reset_completed)
    2396           0 :                 wake_up_all(&dev_priv->gpu_error.reset_queue);
    2397           0 : }
    2398             : 
    2399             : /**
    2400             :  * i915_reset_and_wakeup - do process context error handling work
    2401             :  * @dev: drm device
    2402             :  *
    2403             :  * Fire an error uevent so userspace can see that a hang or error
    2404             :  * was detected.
    2405             :  */
    2406           0 : static void i915_reset_and_wakeup(struct drm_device *dev)
    2407             : {
    2408           0 :         struct drm_i915_private *dev_priv = to_i915(dev);
    2409           0 :         struct i915_gpu_error *error = &dev_priv->gpu_error;
    2410             : #ifdef __linux__
    2411             :         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
    2412             :         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
    2413             :         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
    2414             : #endif
    2415             :         int ret;
    2416             : 
    2417             : #ifdef __linux__
    2418             :         kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
    2419             : #endif
    2420             : 
    2421             :         /*
    2422             :          * Note that there's only one work item which does gpu resets, so we
    2423             :          * need not worry about concurrent gpu resets potentially incrementing
    2424             :          * error->reset_counter twice. We only need to take care of another
    2425             :          * racing irq/hangcheck declaring the gpu dead for a second time. A
    2426             :          * quick check for that is good enough: schedule_work ensures the
    2427             :          * correct ordering between hang detection and this work item, and since
    2428             :          * the reset in-progress bit is only ever set by code outside of this
    2429             :          * work we don't need to worry about any other races.
    2430             :          */
    2431           0 :         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
    2432             :                 DRM_DEBUG_DRIVER("resetting chip\n");
    2433             : #ifdef __linux__
    2434             :                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
    2435             :                                    reset_event);
    2436             : #endif
    2437             : 
    2438             :                 /*
    2439             :                  * In most cases it's guaranteed that we get here with an RPM
    2440             :                  * reference held, for example because there is a pending GPU
    2441             :                  * request that won't finish until the reset is done. This
    2442             :                  * isn't the case at least when we get here by doing a
    2443             :                  * simulated reset via debugs, so get an RPM reference.
    2444             :                  */
    2445           0 :                 intel_runtime_pm_get(dev_priv);
    2446             : 
    2447           0 :                 intel_prepare_reset(dev);
    2448             : 
    2449             :                 /*
    2450             :                  * All state reset _must_ be completed before we update the
    2451             :                  * reset counter, for otherwise waiters might miss the reset
    2452             :                  * pending state and not properly drop locks, resulting in
    2453             :                  * deadlocks with the reset work.
    2454             :                  */
    2455           0 :                 ret = i915_reset(dev);
    2456             : 
    2457           0 :                 intel_finish_reset(dev);
    2458             : 
    2459           0 :                 intel_runtime_pm_put(dev_priv);
    2460             : 
    2461           0 :                 if (ret == 0) {
    2462             :                         /*
    2463             :                          * After all the gem state is reset, increment the reset
    2464             :                          * counter and wake up everyone waiting for the reset to
    2465             :                          * complete.
    2466             :                          *
    2467             :                          * Since unlock operations are a one-sided barrier only,
    2468             :                          * we need to insert a barrier here to order any seqno
    2469             :                          * updates before
    2470             :                          * the counter increment.
    2471             :                          */
    2472           0 :                         smp_mb__before_atomic();
    2473           0 :                         atomic_inc(&dev_priv->gpu_error.reset_counter);
    2474             : 
    2475             : #ifdef __linux__
    2476             :                         kobject_uevent_env(&dev->primary->kdev->kobj,
    2477             :                                            KOBJ_CHANGE, reset_done_event);
    2478             : #endif
    2479           0 :                 } else {
    2480           0 :                         atomic_or(I915_WEDGED, &error->reset_counter);
    2481             :                 }
    2482             : 
    2483             :                 /*
    2484             :                  * Note: The wake_up also serves as a memory barrier so that
    2485             :                  * waiters see the update value of the reset counter atomic_t.
    2486             :                  */
    2487           0 :                 i915_error_wake_up(dev_priv, true);
    2488           0 :         }
    2489           0 : }
    2490             : 
    2491           0 : static void i915_report_and_clear_eir(struct drm_device *dev)
    2492             : {
    2493           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2494           0 :         uint32_t instdone[I915_NUM_INSTDONE_REG];
    2495           0 :         u32 eir = I915_READ(EIR);
    2496             :         int pipe, i;
    2497             : 
    2498           0 :         if (!eir)
    2499           0 :                 return;
    2500             : 
    2501           0 :         pr_err("render error detected, EIR: 0x%08x\n", eir);
    2502             : 
    2503           0 :         i915_get_extra_instdone(dev, instdone);
    2504             : 
    2505           0 :         if (IS_G4X(dev)) {
    2506           0 :                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
    2507           0 :                         u32 ipeir = I915_READ(IPEIR_I965);
    2508             : 
    2509           0 :                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
    2510           0 :                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
    2511           0 :                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
    2512           0 :                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
    2513           0 :                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
    2514           0 :                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
    2515           0 :                         I915_WRITE(IPEIR_I965, ipeir);
    2516           0 :                         POSTING_READ(IPEIR_I965);
    2517           0 :                 }
    2518           0 :                 if (eir & GM45_ERROR_PAGE_TABLE) {
    2519           0 :                         u32 pgtbl_err = I915_READ(PGTBL_ER);
    2520           0 :                         pr_err("page table error\n");
    2521           0 :                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
    2522           0 :                         I915_WRITE(PGTBL_ER, pgtbl_err);
    2523           0 :                         POSTING_READ(PGTBL_ER);
    2524           0 :                 }
    2525             :         }
    2526             : 
    2527           0 :         if (!IS_GEN2(dev)) {
    2528           0 :                 if (eir & I915_ERROR_PAGE_TABLE) {
    2529           0 :                         u32 pgtbl_err = I915_READ(PGTBL_ER);
    2530           0 :                         pr_err("page table error\n");
    2531           0 :                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
    2532           0 :                         I915_WRITE(PGTBL_ER, pgtbl_err);
    2533           0 :                         POSTING_READ(PGTBL_ER);
    2534           0 :                 }
    2535             :         }
    2536             : 
    2537           0 :         if (eir & I915_ERROR_MEMORY_REFRESH) {
    2538           0 :                 pr_err("memory refresh error:\n");
    2539           0 :                 for_each_pipe(dev_priv, pipe)
    2540           0 :                         pr_err("pipe %c stat: 0x%08x\n",
    2541             :                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
    2542             :                 /* pipestat has already been acked */
    2543             :         }
    2544           0 :         if (eir & I915_ERROR_INSTRUCTION) {
    2545           0 :                 pr_err("instruction error\n");
    2546           0 :                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
    2547           0 :                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
    2548           0 :                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
    2549           0 :                 if (INTEL_INFO(dev)->gen < 4) {
    2550           0 :                         u32 ipeir = I915_READ(IPEIR);
    2551             : 
    2552           0 :                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
    2553           0 :                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
    2554           0 :                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
    2555           0 :                         I915_WRITE(IPEIR, ipeir);
    2556           0 :                         POSTING_READ(IPEIR);
    2557           0 :                 } else {
    2558           0 :                         u32 ipeir = I915_READ(IPEIR_I965);
    2559             : 
    2560           0 :                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
    2561           0 :                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
    2562           0 :                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
    2563           0 :                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
    2564           0 :                         I915_WRITE(IPEIR_I965, ipeir);
    2565           0 :                         POSTING_READ(IPEIR_I965);
    2566             :                 }
    2567             :         }
    2568             : 
    2569           0 :         I915_WRITE(EIR, eir);
    2570           0 :         POSTING_READ(EIR);
    2571           0 :         eir = I915_READ(EIR);
    2572           0 :         if (eir) {
    2573             :                 /*
    2574             :                  * some errors might have become stuck,
    2575             :                  * mask them.
    2576             :                  */
    2577           0 :                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
    2578           0 :                 I915_WRITE(EMR, I915_READ(EMR) | eir);
    2579           0 :                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
    2580           0 :         }
    2581           0 : }
    2582             : 
    2583             : /**
    2584             :  * i915_handle_error - handle a gpu error
    2585             :  * @dev: drm device
    2586             :  *
    2587             :  * Do some basic checking of register state at error time and
    2588             :  * dump it to the syslog.  Also call i915_capture_error_state() to make
    2589             :  * sure we get a record and make it available in debugfs.  Fire a uevent
    2590             :  * so userspace knows something bad happened (should trigger collection
    2591             :  * of a ring dump etc.).
    2592             :  */
    2593           0 : void i915_handle_error(struct drm_device *dev, bool wedged,
    2594             :                        const char *fmt, ...)
    2595             : {
    2596           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2597           0 :         va_list args;
    2598           0 :         char error_msg[80];
    2599             : 
    2600           0 :         va_start(args, fmt);
    2601           0 :         vsnprintf(error_msg, sizeof(error_msg), fmt, args);
    2602           0 :         va_end(args);
    2603             : 
    2604           0 :         i915_capture_error_state(dev, wedged, error_msg);
    2605           0 :         i915_report_and_clear_eir(dev);
    2606             : 
    2607           0 :         if (wedged) {
    2608           0 :                 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
    2609             :                                 &dev_priv->gpu_error.reset_counter);
    2610             : 
    2611             :                 /*
    2612             :                  * Wakeup waiting processes so that the reset function
    2613             :                  * i915_reset_and_wakeup doesn't deadlock trying to grab
    2614             :                  * various locks. By bumping the reset counter first, the woken
    2615             :                  * processes will see a reset in progress and back off,
    2616             :                  * releasing their locks and then wait for the reset completion.
    2617             :                  * We must do this for _all_ gpu waiters that might hold locks
    2618             :                  * that the reset work needs to acquire.
    2619             :                  *
    2620             :                  * Note: The wake_up serves as the required memory barrier to
    2621             :                  * ensure that the waiters see the updated value of the reset
    2622             :                  * counter atomic_t.
    2623             :                  */
    2624           0 :                 i915_error_wake_up(dev_priv, false);
    2625           0 :         }
    2626             : 
    2627           0 :         i915_reset_and_wakeup(dev);
    2628           0 : }
    2629             : 
    2630             : /* Called from drm generic code, passed 'crtc' which
    2631             :  * we use as a pipe index
    2632             :  */
    2633           0 : static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
    2634             : {
    2635           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2636             :         unsigned long irqflags;
    2637             : 
    2638           0 :         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
    2639           0 :         if (INTEL_INFO(dev)->gen >= 4)
    2640           0 :                 i915_enable_pipestat(dev_priv, pipe,
    2641             :                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
    2642             :         else
    2643           0 :                 i915_enable_pipestat(dev_priv, pipe,
    2644             :                                      PIPE_VBLANK_INTERRUPT_STATUS);
    2645           0 :         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
    2646             : 
    2647           0 :         return 0;
    2648             : }
    2649             : 
    2650           0 : static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
    2651             : {
    2652           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2653             :         unsigned long irqflags;
    2654           0 :         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
    2655           0 :                                                      DE_PIPE_VBLANK(pipe);
    2656             : 
    2657           0 :         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
    2658           0 :         ironlake_enable_display_irq(dev_priv, bit);
    2659           0 :         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
    2660             : 
    2661           0 :         return 0;
    2662             : }
    2663             : 
    2664           0 : static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
    2665             : {
    2666           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2667             :         unsigned long irqflags;
    2668             : 
    2669           0 :         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
    2670           0 :         i915_enable_pipestat(dev_priv, pipe,
    2671             :                              PIPE_START_VBLANK_INTERRUPT_STATUS);
    2672           0 :         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
    2673             : 
    2674           0 :         return 0;
    2675             : }
    2676             : 
    2677           0 : static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
    2678             : {
    2679           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2680             :         unsigned long irqflags;
    2681             : 
    2682           0 :         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
    2683           0 :         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
    2684           0 :         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
    2685           0 :         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
    2686           0 :         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
    2687           0 :         return 0;
    2688             : }
    2689             : 
    2690             : /* Called from drm generic code, passed 'crtc' which
    2691             :  * we use as a pipe index
    2692             :  */
    2693           0 : static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
    2694             : {
    2695           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2696             :         unsigned long irqflags;
    2697             : 
    2698           0 :         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
    2699           0 :         i915_disable_pipestat(dev_priv, pipe,
    2700             :                               PIPE_VBLANK_INTERRUPT_STATUS |
    2701             :                               PIPE_START_VBLANK_INTERRUPT_STATUS);
    2702           0 :         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
    2703           0 : }
    2704             : 
    2705           0 : static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
    2706             : {
    2707           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2708             :         unsigned long irqflags;
    2709           0 :         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
    2710           0 :                                                      DE_PIPE_VBLANK(pipe);
    2711             : 
    2712           0 :         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
    2713           0 :         ironlake_disable_display_irq(dev_priv, bit);
    2714           0 :         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
    2715           0 : }
    2716             : 
    2717           0 : static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
    2718             : {
    2719           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2720             :         unsigned long irqflags;
    2721             : 
    2722           0 :         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
    2723           0 :         i915_disable_pipestat(dev_priv, pipe,
    2724             :                               PIPE_START_VBLANK_INTERRUPT_STATUS);
    2725           0 :         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
    2726           0 : }
    2727             : 
    2728           0 : static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
    2729             : {
    2730           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2731             :         unsigned long irqflags;
    2732             : 
    2733           0 :         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
    2734           0 :         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
    2735           0 :         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
    2736           0 :         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
    2737           0 :         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
    2738           0 : }
    2739             : 
    2740             : static bool
    2741           0 : ring_idle(struct intel_engine_cs *ring, u32 seqno)
    2742             : {
    2743           0 :         return (list_empty(&ring->request_list) ||
    2744           0 :                 i915_seqno_passed(seqno, ring->last_submitted_seqno));
    2745             : }
    2746             : 
    2747             : static bool
    2748           0 : ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
    2749             : {
    2750           0 :         if (INTEL_INFO(dev)->gen >= 8) {
    2751           0 :                 return (ipehr >> 23) == 0x1c;
    2752             :         } else {
    2753           0 :                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
    2754           0 :                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
    2755             :                                  MI_SEMAPHORE_REGISTER);
    2756             :         }
    2757           0 : }
    2758             : 
    2759             : static struct intel_engine_cs *
    2760           0 : semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
    2761             : {
    2762           0 :         struct drm_i915_private *dev_priv = ring->dev->dev_private;
    2763             :         struct intel_engine_cs *signaller;
    2764             :         int i;
    2765             : 
    2766           0 :         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
    2767           0 :                 for_each_ring(signaller, dev_priv, i) {
    2768           0 :                         if (ring == signaller)
    2769             :                                 continue;
    2770             : 
    2771           0 :                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
    2772           0 :                                 return signaller;
    2773             :                 }
    2774             :         } else {
    2775           0 :                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
    2776             : 
    2777           0 :                 for_each_ring(signaller, dev_priv, i) {
    2778           0 :                         if(ring == signaller)
    2779             :                                 continue;
    2780             : 
    2781           0 :                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
    2782           0 :                                 return signaller;
    2783             :                 }
    2784           0 :         }
    2785             : 
    2786           0 :         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
    2787             :                   ring->id, ipehr, offset);
    2788             : 
    2789           0 :         return NULL;
    2790           0 : }
    2791             : 
    2792             : static struct intel_engine_cs *
    2793           0 : semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
    2794             : {
    2795           0 :         struct drm_i915_private *dev_priv = ring->dev->dev_private;
    2796             :         u32 cmd, ipehr, head;
    2797             :         u64 offset = 0;
    2798             :         int i, backwards;
    2799             : 
    2800             :         /*
    2801             :          * This function does not support execlist mode - any attempt to
    2802             :          * proceed further into this function will result in a kernel panic
    2803             :          * when dereferencing ring->buffer, which is not set up in execlist
    2804             :          * mode.
    2805             :          *
    2806             :          * The correct way of doing it would be to derive the currently
    2807             :          * executing ring buffer from the current context, which is derived
    2808             :          * from the currently running request. Unfortunately, to get the
    2809             :          * current request we would have to grab the struct_mutex before doing
    2810             :          * anything else, which would be ill-advised since some other thread
    2811             :          * might have grabbed it already and managed to hang itself, causing
    2812             :          * the hang checker to deadlock.
    2813             :          *
    2814             :          * Therefore, this function does not support execlist mode in its
    2815             :          * current form. Just return NULL and move on.
    2816             :          */
    2817           0 :         if (ring->buffer == NULL)
    2818           0 :                 return NULL;
    2819             : 
    2820           0 :         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
    2821           0 :         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
    2822           0 :                 return NULL;
    2823             : 
    2824             :         /*
    2825             :          * HEAD is likely pointing to the dword after the actual command,
    2826             :          * so scan backwards until we find the MBOX. But limit it to just 3
    2827             :          * or 4 dwords depending on the semaphore wait command size.
    2828             :          * Note that we don't care about ACTHD here since that might
    2829             :          * point at at batch, and semaphores are always emitted into the
    2830             :          * ringbuffer itself.
    2831             :          */
    2832           0 :         head = I915_READ_HEAD(ring) & HEAD_ADDR;
    2833           0 :         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
    2834             : 
    2835           0 :         for (i = backwards; i; --i) {
    2836             :                 /*
    2837             :                  * Be paranoid and presume the hw has gone off into the wild -
    2838             :                  * our ring is smaller than what the hardware (and hence
    2839             :                  * HEAD_ADDR) allows. Also handles wrap-around.
    2840             :                  */
    2841           0 :                 head &= ring->buffer->size - 1;
    2842             : 
    2843             :                 /* This here seems to blow up */
    2844           0 :                 cmd = ioread32(ring->buffer->virtual_start + head);
    2845           0 :                 if (cmd == ipehr)
    2846             :                         break;
    2847             : 
    2848           0 :                 head -= 4;
    2849             :         }
    2850             : 
    2851           0 :         if (!i)
    2852           0 :                 return NULL;
    2853             : 
    2854           0 :         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
    2855           0 :         if (INTEL_INFO(ring->dev)->gen >= 8) {
    2856           0 :                 offset = ioread32(ring->buffer->virtual_start + head + 12);
    2857             :                 offset <<= 32;
    2858           0 :                 offset = ioread32(ring->buffer->virtual_start + head + 8);
    2859           0 :         }
    2860           0 :         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
    2861           0 : }
    2862             : 
    2863           0 : static int semaphore_passed(struct intel_engine_cs *ring)
    2864             : {
    2865           0 :         struct drm_i915_private *dev_priv = ring->dev->dev_private;
    2866             :         struct intel_engine_cs *signaller;
    2867           0 :         u32 seqno;
    2868             : 
    2869           0 :         ring->hangcheck.deadlock++;
    2870             : 
    2871           0 :         signaller = semaphore_waits_for(ring, &seqno);
    2872           0 :         if (signaller == NULL)
    2873           0 :                 return -1;
    2874             : 
    2875             :         /* Prevent pathological recursion due to driver bugs */
    2876           0 :         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
    2877           0 :                 return -1;
    2878             : 
    2879           0 :         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
    2880           0 :                 return 1;
    2881             : 
    2882             :         /* cursory check for an unkickable deadlock */
    2883           0 :         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
    2884           0 :             semaphore_passed(signaller) < 0)
    2885           0 :                 return -1;
    2886             : 
    2887           0 :         return 0;
    2888           0 : }
    2889             : 
    2890           0 : static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
    2891             : {
    2892             :         struct intel_engine_cs *ring;
    2893             :         int i;
    2894             : 
    2895           0 :         for_each_ring(ring, dev_priv, i)
    2896           0 :                 ring->hangcheck.deadlock = 0;
    2897           0 : }
    2898             : 
    2899             : static enum intel_ring_hangcheck_action
    2900           0 : ring_stuck(struct intel_engine_cs *ring, u64 acthd)
    2901             : {
    2902           0 :         struct drm_device *dev = ring->dev;
    2903           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    2904             :         u32 tmp;
    2905             : 
    2906           0 :         if (acthd != ring->hangcheck.acthd) {
    2907           0 :                 if (acthd > ring->hangcheck.max_acthd) {
    2908           0 :                         ring->hangcheck.max_acthd = acthd;
    2909           0 :                         return HANGCHECK_ACTIVE;
    2910             :                 }
    2911             : 
    2912           0 :                 return HANGCHECK_ACTIVE_LOOP;
    2913             :         }
    2914             : 
    2915           0 :         if (IS_GEN2(dev))
    2916           0 :                 return HANGCHECK_HUNG;
    2917             : 
    2918             :         /* Is the chip hanging on a WAIT_FOR_EVENT?
    2919             :          * If so we can simply poke the RB_WAIT bit
    2920             :          * and break the hang. This should work on
    2921             :          * all but the second generation chipsets.
    2922             :          */
    2923           0 :         tmp = I915_READ_CTL(ring);
    2924           0 :         if (tmp & RING_WAIT) {
    2925           0 :                 i915_handle_error(dev, false,
    2926             :                                   "Kicking stuck wait on %s",
    2927           0 :                                   ring->name);
    2928           0 :                 I915_WRITE_CTL(ring, tmp);
    2929           0 :                 return HANGCHECK_KICK;
    2930             :         }
    2931             : 
    2932           0 :         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
    2933           0 :                 switch (semaphore_passed(ring)) {
    2934             :                 default:
    2935           0 :                         return HANGCHECK_HUNG;
    2936             :                 case 1:
    2937           0 :                         i915_handle_error(dev, false,
    2938             :                                           "Kicking stuck semaphore on %s",
    2939           0 :                                           ring->name);
    2940           0 :                         I915_WRITE_CTL(ring, tmp);
    2941           0 :                         return HANGCHECK_KICK;
    2942             :                 case 0:
    2943           0 :                         return HANGCHECK_WAIT;
    2944             :                 }
    2945             :         }
    2946             : 
    2947           0 :         return HANGCHECK_HUNG;
    2948           0 : }
    2949             : 
    2950             : /*
    2951             :  * This is called when the chip hasn't reported back with completed
    2952             :  * batchbuffers in a long time. We keep track per ring seqno progress and
    2953             :  * if there are no progress, hangcheck score for that ring is increased.
    2954             :  * Further, acthd is inspected to see if the ring is stuck. On stuck case
    2955             :  * we kick the ring. If we see no progress on three subsequent calls
    2956             :  * we assume chip is wedged and try to fix it by resetting the chip.
    2957             :  */
    2958           0 : static void i915_hangcheck_elapsed(struct work_struct *work)
    2959             : {
    2960             :         struct drm_i915_private *dev_priv =
    2961           0 :                 container_of(work, typeof(*dev_priv),
    2962             :                              gpu_error.hangcheck_work.work);
    2963           0 :         struct drm_device *dev = dev_priv->dev;
    2964             :         struct intel_engine_cs *ring;
    2965             :         int i;
    2966             :         int busy_count = 0, rings_hung = 0;
    2967           0 :         bool stuck[I915_NUM_RINGS] = { 0 };
    2968             : #define BUSY 1
    2969             : #define KICK 5
    2970             : #define HUNG 20
    2971             : 
    2972           0 :         if (!i915.enable_hangcheck)
    2973           0 :                 return;
    2974             : 
    2975           0 :         for_each_ring(ring, dev_priv, i) {
    2976             :                 u64 acthd;
    2977             :                 u32 seqno;
    2978             :                 bool busy = true;
    2979             : 
    2980           0 :                 semaphore_clear_deadlocks(dev_priv);
    2981             : 
    2982           0 :                 seqno = ring->get_seqno(ring, false);
    2983           0 :                 acthd = intel_ring_get_active_head(ring);
    2984             : 
    2985           0 :                 if (ring->hangcheck.seqno == seqno) {
    2986           0 :                         if (ring_idle(ring, seqno)) {
    2987           0 :                                 ring->hangcheck.action = HANGCHECK_IDLE;
    2988             : 
    2989           0 :                                 if (waitqueue_active(&ring->irq_queue)) {
    2990             :                                         /* Issue a wake-up to catch stuck h/w. */
    2991           0 :                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
    2992           0 :                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
    2993           0 :                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
    2994             :                                                                   ring->name);
    2995             :                                                 else
    2996             :                                                         DRM_INFO("Fake missed irq on %s\n",
    2997             :                                                                  ring->name);
    2998           0 :                                                 wake_up_all(&ring->irq_queue);
    2999           0 :                                         }
    3000             :                                         /* Safeguard against driver failure */
    3001           0 :                                         ring->hangcheck.score += BUSY;
    3002           0 :                                 } else
    3003             :                                         busy = false;
    3004             :                         } else {
    3005             :                                 /* We always increment the hangcheck score
    3006             :                                  * if the ring is busy and still processing
    3007             :                                  * the same request, so that no single request
    3008             :                                  * can run indefinitely (such as a chain of
    3009             :                                  * batches). The only time we do not increment
    3010             :                                  * the hangcheck score on this ring, if this
    3011             :                                  * ring is in a legitimate wait for another
    3012             :                                  * ring. In that case the waiting ring is a
    3013             :                                  * victim and we want to be sure we catch the
    3014             :                                  * right culprit. Then every time we do kick
    3015             :                                  * the ring, add a small increment to the
    3016             :                                  * score so that we can catch a batch that is
    3017             :                                  * being repeatedly kicked and so responsible
    3018             :                                  * for stalling the machine.
    3019             :                                  */
    3020           0 :                                 ring->hangcheck.action = ring_stuck(ring,
    3021             :                                                                     acthd);
    3022             : 
    3023           0 :                                 switch (ring->hangcheck.action) {
    3024             :                                 case HANGCHECK_IDLE:
    3025             :                                 case HANGCHECK_WAIT:
    3026             :                                 case HANGCHECK_ACTIVE:
    3027             :                                         break;
    3028             :                                 case HANGCHECK_ACTIVE_LOOP:
    3029           0 :                                         ring->hangcheck.score += BUSY;
    3030           0 :                                         break;
    3031             :                                 case HANGCHECK_KICK:
    3032           0 :                                         ring->hangcheck.score += KICK;
    3033           0 :                                         break;
    3034             :                                 case HANGCHECK_HUNG:
    3035           0 :                                         ring->hangcheck.score += HUNG;
    3036           0 :                                         stuck[i] = true;
    3037           0 :                                         break;
    3038             :                                 }
    3039             :                         }
    3040             :                 } else {
    3041           0 :                         ring->hangcheck.action = HANGCHECK_ACTIVE;
    3042             : 
    3043             :                         /* Gradually reduce the count so that we catch DoS
    3044             :                          * attempts across multiple batches.
    3045             :                          */
    3046           0 :                         if (ring->hangcheck.score > 0)
    3047           0 :                                 ring->hangcheck.score--;
    3048             : 
    3049           0 :                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
    3050             :                 }
    3051             : 
    3052           0 :                 ring->hangcheck.seqno = seqno;
    3053           0 :                 ring->hangcheck.acthd = acthd;
    3054           0 :                 busy_count += busy;
    3055           0 :         }
    3056             : 
    3057           0 :         for_each_ring(ring, dev_priv, i) {
    3058           0 :                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
    3059             :                         DRM_INFO("%s on %s\n",
    3060             :                                  stuck[i] ? "stuck" : "no progress",
    3061             :                                  ring->name);
    3062           0 :                         rings_hung++;
    3063           0 :                 }
    3064             :         }
    3065             : 
    3066           0 :         if (rings_hung)
    3067           0 :                 return i915_handle_error(dev, true, "Ring hung");
    3068             : 
    3069           0 :         if (busy_count)
    3070             :                 /* Reset timer case chip hangs without another request
    3071             :                  * being added */
    3072           0 :                 i915_queue_hangcheck(dev);
    3073           0 : }
    3074             : 
    3075           0 : void i915_queue_hangcheck(struct drm_device *dev)
    3076             : {
    3077           0 :         struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
    3078             : 
    3079           0 :         if (!i915.enable_hangcheck)
    3080           0 :                 return;
    3081             : 
    3082             :         /* Don't continually defer the hangcheck so that it is always run at
    3083             :          * least once after work has been scheduled on any ring. Otherwise,
    3084             :          * we will ignore a hung ring if a second ring is kept busy.
    3085             :          */
    3086             : 
    3087           0 :         queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
    3088           0 :                            round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
    3089           0 : }
    3090             : 
    3091           0 : static void ibx_irq_reset(struct drm_device *dev)
    3092             : {
    3093           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3094             : 
    3095           0 :         if (HAS_PCH_NOP(dev))
    3096           0 :                 return;
    3097             : 
    3098           0 :         GEN5_IRQ_RESET(SDE);
    3099             : 
    3100           0 :         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
    3101           0 :                 I915_WRITE(SERR_INT, 0xffffffff);
    3102           0 : }
    3103             : 
    3104             : /*
    3105             :  * SDEIER is also touched by the interrupt handler to work around missed PCH
    3106             :  * interrupts. Hence we can't update it after the interrupt handler is enabled -
    3107             :  * instead we unconditionally enable all PCH interrupt sources here, but then
    3108             :  * only unmask them as needed with SDEIMR.
    3109             :  *
    3110             :  * This function needs to be called before interrupts are enabled.
    3111             :  */
    3112           0 : static void ibx_irq_pre_postinstall(struct drm_device *dev)
    3113             : {
    3114           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3115             : 
    3116           0 :         if (HAS_PCH_NOP(dev))
    3117           0 :                 return;
    3118             : 
    3119           0 :         WARN_ON(I915_READ(SDEIER) != 0);
    3120           0 :         I915_WRITE(SDEIER, 0xffffffff);
    3121           0 :         POSTING_READ(SDEIER);
    3122           0 : }
    3123             : 
    3124           0 : static void gen5_gt_irq_reset(struct drm_device *dev)
    3125             : {
    3126           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3127             : 
    3128           0 :         GEN5_IRQ_RESET(GT);
    3129           0 :         if (INTEL_INFO(dev)->gen >= 6)
    3130           0 :                 GEN5_IRQ_RESET(GEN6_PM);
    3131           0 : }
    3132             : 
    3133             : /* drm_dma.h hooks
    3134             : */
    3135           0 : static void ironlake_irq_reset(struct drm_device *dev)
    3136             : {
    3137           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3138             : 
    3139           0 :         I915_WRITE(HWSTAM, 0xffffffff);
    3140             : 
    3141           0 :         GEN5_IRQ_RESET(DE);
    3142           0 :         if (IS_GEN7(dev))
    3143           0 :                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
    3144             : 
    3145           0 :         gen5_gt_irq_reset(dev);
    3146             : 
    3147           0 :         ibx_irq_reset(dev);
    3148           0 : }
    3149             : 
    3150           0 : static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
    3151             : {
    3152             :         enum pipe pipe;
    3153             : 
    3154           0 :         i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
    3155           0 :         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
    3156             : 
    3157           0 :         for_each_pipe(dev_priv, pipe)
    3158           0 :                 I915_WRITE(PIPESTAT(pipe), 0xffff);
    3159             : 
    3160           0 :         GEN5_IRQ_RESET(VLV_);
    3161           0 : }
    3162             : 
    3163           0 : static void valleyview_irq_preinstall(struct drm_device *dev)
    3164             : {
    3165           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3166             : 
    3167             :         /* VLV magic */
    3168           0 :         I915_WRITE(VLV_IMR, 0);
    3169           0 :         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
    3170           0 :         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
    3171           0 :         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
    3172             : 
    3173           0 :         gen5_gt_irq_reset(dev);
    3174             : 
    3175           0 :         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
    3176             : 
    3177           0 :         vlv_display_irq_reset(dev_priv);
    3178           0 : }
    3179             : 
    3180           0 : static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
    3181             : {
    3182           0 :         GEN8_IRQ_RESET_NDX(GT, 0);
    3183           0 :         GEN8_IRQ_RESET_NDX(GT, 1);
    3184           0 :         GEN8_IRQ_RESET_NDX(GT, 2);
    3185           0 :         GEN8_IRQ_RESET_NDX(GT, 3);
    3186           0 : }
    3187             : 
    3188           0 : static void gen8_irq_reset(struct drm_device *dev)
    3189             : {
    3190           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3191             :         int pipe;
    3192             : 
    3193           0 :         I915_WRITE(GEN8_MASTER_IRQ, 0);
    3194           0 :         POSTING_READ(GEN8_MASTER_IRQ);
    3195             : 
    3196           0 :         gen8_gt_irq_reset(dev_priv);
    3197             : 
    3198           0 :         for_each_pipe(dev_priv, pipe)
    3199           0 :                 if (intel_display_power_is_enabled(dev_priv,
    3200             :                                                    POWER_DOMAIN_PIPE(pipe)))
    3201           0 :                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
    3202             : 
    3203           0 :         GEN5_IRQ_RESET(GEN8_DE_PORT_);
    3204           0 :         GEN5_IRQ_RESET(GEN8_DE_MISC_);
    3205           0 :         GEN5_IRQ_RESET(GEN8_PCU_);
    3206             : 
    3207           0 :         if (HAS_PCH_SPLIT(dev))
    3208           0 :                 ibx_irq_reset(dev);
    3209           0 : }
    3210             : 
    3211           0 : void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
    3212             :                                      unsigned int pipe_mask)
    3213             : {
    3214             :         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
    3215             : 
    3216           0 :         spin_lock_irq(&dev_priv->irq_lock);
    3217           0 :         if (pipe_mask & 1 << PIPE_A)
    3218           0 :                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
    3219             :                                   dev_priv->de_irq_mask[PIPE_A],
    3220             :                                   ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
    3221           0 :         if (pipe_mask & 1 << PIPE_B)
    3222           0 :                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
    3223             :                                   dev_priv->de_irq_mask[PIPE_B],
    3224             :                                   ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
    3225           0 :         if (pipe_mask & 1 << PIPE_C)
    3226           0 :                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
    3227             :                                   dev_priv->de_irq_mask[PIPE_C],
    3228             :                                   ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
    3229           0 :         spin_unlock_irq(&dev_priv->irq_lock);
    3230           0 : }
    3231             : 
    3232           0 : static void cherryview_irq_preinstall(struct drm_device *dev)
    3233             : {
    3234           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3235             : 
    3236           0 :         I915_WRITE(GEN8_MASTER_IRQ, 0);
    3237           0 :         POSTING_READ(GEN8_MASTER_IRQ);
    3238             : 
    3239           0 :         gen8_gt_irq_reset(dev_priv);
    3240             : 
    3241           0 :         GEN5_IRQ_RESET(GEN8_PCU_);
    3242             : 
    3243           0 :         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
    3244             : 
    3245           0 :         vlv_display_irq_reset(dev_priv);
    3246           0 : }
    3247             : 
    3248           0 : static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
    3249             :                                   const u32 hpd[HPD_NUM_PINS])
    3250             : {
    3251           0 :         struct drm_i915_private *dev_priv = to_i915(dev);
    3252             :         struct intel_encoder *encoder;
    3253             :         u32 enabled_irqs = 0;
    3254             : 
    3255           0 :         for_each_intel_encoder(dev, encoder)
    3256           0 :                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
    3257           0 :                         enabled_irqs |= hpd[encoder->hpd_pin];
    3258             : 
    3259           0 :         return enabled_irqs;
    3260             : }
    3261             : 
    3262           0 : static void ibx_hpd_irq_setup(struct drm_device *dev)
    3263             : {
    3264           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3265             :         u32 hotplug_irqs, hotplug, enabled_irqs;
    3266             : 
    3267           0 :         if (HAS_PCH_IBX(dev)) {
    3268             :                 hotplug_irqs = SDE_HOTPLUG_MASK;
    3269           0 :                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
    3270           0 :         } else {
    3271             :                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
    3272           0 :                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
    3273             :         }
    3274             : 
    3275           0 :         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
    3276             : 
    3277             :         /*
    3278             :          * Enable digital hotplug on the PCH, and configure the DP short pulse
    3279             :          * duration to 2ms (which is the minimum in the Display Port spec).
    3280             :          * The pulse duration bits are reserved on LPT+.
    3281             :          */
    3282           0 :         hotplug = I915_READ(PCH_PORT_HOTPLUG);
    3283           0 :         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
    3284           0 :         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
    3285           0 :         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
    3286           0 :         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
    3287             :         /*
    3288             :          * When CPU and PCH are on the same package, port A
    3289             :          * HPD must be enabled in both north and south.
    3290             :          */
    3291           0 :         if (HAS_PCH_LPT_LP(dev))
    3292           0 :                 hotplug |= PORTA_HOTPLUG_ENABLE;
    3293           0 :         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
    3294           0 : }
    3295             : 
    3296           0 : static void spt_hpd_irq_setup(struct drm_device *dev)
    3297             : {
    3298           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3299             :         u32 hotplug_irqs, hotplug, enabled_irqs;
    3300             : 
    3301             :         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
    3302           0 :         enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
    3303             : 
    3304           0 :         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
    3305             : 
    3306             :         /* Enable digital hotplug on the PCH */
    3307           0 :         hotplug = I915_READ(PCH_PORT_HOTPLUG);
    3308           0 :         hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
    3309             :                 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
    3310           0 :         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
    3311             : 
    3312           0 :         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
    3313           0 :         hotplug |= PORTE_HOTPLUG_ENABLE;
    3314           0 :         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
    3315           0 : }
    3316             : 
    3317           0 : static void ilk_hpd_irq_setup(struct drm_device *dev)
    3318             : {
    3319           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3320             :         u32 hotplug_irqs, hotplug, enabled_irqs;
    3321             : 
    3322           0 :         if (INTEL_INFO(dev)->gen >= 8) {
    3323             :                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
    3324           0 :                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
    3325             : 
    3326           0 :                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
    3327           0 :         } else if (INTEL_INFO(dev)->gen >= 7) {
    3328             :                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
    3329           0 :                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
    3330             : 
    3331           0 :                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
    3332           0 :         } else {
    3333             :                 hotplug_irqs = DE_DP_A_HOTPLUG;
    3334           0 :                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
    3335             : 
    3336           0 :                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
    3337             :         }
    3338             : 
    3339             :         /*
    3340             :          * Enable digital hotplug on the CPU, and configure the DP short pulse
    3341             :          * duration to 2ms (which is the minimum in the Display Port spec)
    3342             :          * The pulse duration bits are reserved on HSW+.
    3343             :          */
    3344           0 :         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
    3345           0 :         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
    3346           0 :         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
    3347           0 :         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
    3348             : 
    3349           0 :         ibx_hpd_irq_setup(dev);
    3350           0 : }
    3351             : 
    3352           0 : static void bxt_hpd_irq_setup(struct drm_device *dev)
    3353             : {
    3354           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3355             :         u32 hotplug_irqs, hotplug, enabled_irqs;
    3356             : 
    3357           0 :         enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
    3358             :         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
    3359             : 
    3360           0 :         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
    3361             : 
    3362           0 :         hotplug = I915_READ(PCH_PORT_HOTPLUG);
    3363           0 :         hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
    3364             :                 PORTA_HOTPLUG_ENABLE;
    3365           0 :         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
    3366           0 : }
    3367             : 
    3368           0 : static void ibx_irq_postinstall(struct drm_device *dev)
    3369             : {
    3370           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3371             :         u32 mask;
    3372             : 
    3373           0 :         if (HAS_PCH_NOP(dev))
    3374           0 :                 return;
    3375             : 
    3376           0 :         if (HAS_PCH_IBX(dev))
    3377           0 :                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
    3378             :         else
    3379             :                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
    3380             : 
    3381           0 :         gen5_assert_iir_is_zero(dev_priv, SDEIIR);
    3382           0 :         I915_WRITE(SDEIMR, ~mask);
    3383           0 : }
    3384             : 
    3385           0 : static void gen5_gt_irq_postinstall(struct drm_device *dev)
    3386             : {
    3387           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3388             :         u32 pm_irqs, gt_irqs;
    3389             : 
    3390             :         pm_irqs = gt_irqs = 0;
    3391             : 
    3392           0 :         dev_priv->gt_irq_mask = ~0;
    3393           0 :         if (HAS_L3_DPF(dev)) {
    3394             :                 /* L3 parity interrupt is always unmasked. */
    3395           0 :                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
    3396           0 :                 gt_irqs |= GT_PARITY_ERROR(dev);
    3397           0 :         }
    3398             : 
    3399           0 :         gt_irqs |= GT_RENDER_USER_INTERRUPT;
    3400           0 :         if (IS_GEN5(dev)) {
    3401           0 :                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
    3402             :                            ILK_BSD_USER_INTERRUPT;
    3403           0 :         } else {
    3404           0 :                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
    3405             :         }
    3406             : 
    3407           0 :         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
    3408             : 
    3409           0 :         if (INTEL_INFO(dev)->gen >= 6) {
    3410             :                 /*
    3411             :                  * RPS interrupts will get enabled/disabled on demand when RPS
    3412             :                  * itself is enabled/disabled.
    3413             :                  */
    3414           0 :                 if (HAS_VEBOX(dev))
    3415           0 :                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
    3416             : 
    3417           0 :                 dev_priv->pm_irq_mask = 0xffffffff;
    3418           0 :                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
    3419           0 :         }
    3420           0 : }
    3421             : 
    3422           0 : static int ironlake_irq_postinstall(struct drm_device *dev)
    3423             : {
    3424           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3425             :         u32 display_mask, extra_mask;
    3426             : 
    3427           0 :         if (INTEL_INFO(dev)->gen >= 7) {
    3428             :                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
    3429             :                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
    3430             :                                 DE_PLANEB_FLIP_DONE_IVB |
    3431             :                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
    3432             :                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
    3433             :                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
    3434             :                               DE_DP_A_HOTPLUG_IVB);
    3435           0 :         } else {
    3436             :                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
    3437             :                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
    3438             :                                 DE_AUX_CHANNEL_A |
    3439             :                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
    3440             :                                 DE_POISON);
    3441             :                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
    3442             :                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
    3443             :                               DE_DP_A_HOTPLUG);
    3444             :         }
    3445             : 
    3446           0 :         dev_priv->irq_mask = ~display_mask;
    3447             : 
    3448           0 :         I915_WRITE(HWSTAM, 0xeffe);
    3449             : 
    3450           0 :         ibx_irq_pre_postinstall(dev);
    3451             : 
    3452           0 :         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
    3453             : 
    3454           0 :         gen5_gt_irq_postinstall(dev);
    3455             : 
    3456           0 :         ibx_irq_postinstall(dev);
    3457             : 
    3458           0 :         if (IS_IRONLAKE_M(dev)) {
    3459             :                 /* Enable PCU event interrupts
    3460             :                  *
    3461             :                  * spinlocking not required here for correctness since interrupt
    3462             :                  * setup is guaranteed to run in single-threaded context. But we
    3463             :                  * need it to make the assert_spin_locked happy. */
    3464           0 :                 spin_lock_irq(&dev_priv->irq_lock);
    3465           0 :                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
    3466           0 :                 spin_unlock_irq(&dev_priv->irq_lock);
    3467           0 :         }
    3468             : 
    3469           0 :         return 0;
    3470             : }
    3471             : 
    3472           0 : static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
    3473             : {
    3474             :         u32 pipestat_mask;
    3475             :         u32 iir_mask;
    3476             :         enum pipe pipe;
    3477             : 
    3478             :         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
    3479             :                         PIPE_FIFO_UNDERRUN_STATUS;
    3480             : 
    3481           0 :         for_each_pipe(dev_priv, pipe)
    3482           0 :                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
    3483           0 :         POSTING_READ(PIPESTAT(PIPE_A));
    3484             : 
    3485             :         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
    3486             :                         PIPE_CRC_DONE_INTERRUPT_STATUS;
    3487             : 
    3488           0 :         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
    3489           0 :         for_each_pipe(dev_priv, pipe)
    3490           0 :                       i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
    3491             : 
    3492             :         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
    3493             :                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
    3494             :                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
    3495           0 :         if (IS_CHERRYVIEW(dev_priv))
    3496           0 :                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
    3497           0 :         dev_priv->irq_mask &= ~iir_mask;
    3498             : 
    3499           0 :         I915_WRITE(VLV_IIR, iir_mask);
    3500           0 :         I915_WRITE(VLV_IIR, iir_mask);
    3501           0 :         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
    3502           0 :         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
    3503           0 :         POSTING_READ(VLV_IMR);
    3504           0 : }
    3505             : 
    3506           0 : static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
    3507             : {
    3508             :         u32 pipestat_mask;
    3509             :         u32 iir_mask;
    3510             :         enum pipe pipe;
    3511             : 
    3512             :         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
    3513             :                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
    3514             :                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
    3515           0 :         if (IS_CHERRYVIEW(dev_priv))
    3516           0 :                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
    3517             : 
    3518           0 :         dev_priv->irq_mask |= iir_mask;
    3519           0 :         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
    3520           0 :         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
    3521           0 :         I915_WRITE(VLV_IIR, iir_mask);
    3522           0 :         I915_WRITE(VLV_IIR, iir_mask);
    3523           0 :         POSTING_READ(VLV_IIR);
    3524             : 
    3525             :         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
    3526             :                         PIPE_CRC_DONE_INTERRUPT_STATUS;
    3527             : 
    3528           0 :         i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
    3529           0 :         for_each_pipe(dev_priv, pipe)
    3530           0 :                 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
    3531             : 
    3532             :         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
    3533             :                         PIPE_FIFO_UNDERRUN_STATUS;
    3534             : 
    3535           0 :         for_each_pipe(dev_priv, pipe)
    3536           0 :                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
    3537           0 :         POSTING_READ(PIPESTAT(PIPE_A));
    3538           0 : }
    3539             : 
    3540           0 : void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
    3541             : {
    3542           0 :         assert_spin_locked(&dev_priv->irq_lock);
    3543             : 
    3544           0 :         if (dev_priv->display_irqs_enabled)
    3545             :                 return;
    3546             : 
    3547           0 :         dev_priv->display_irqs_enabled = true;
    3548             : 
    3549           0 :         if (intel_irqs_enabled(dev_priv))
    3550           0 :                 valleyview_display_irqs_install(dev_priv);
    3551           0 : }
    3552             : 
    3553           0 : void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
    3554             : {
    3555           0 :         assert_spin_locked(&dev_priv->irq_lock);
    3556             : 
    3557           0 :         if (!dev_priv->display_irqs_enabled)
    3558             :                 return;
    3559             : 
    3560           0 :         dev_priv->display_irqs_enabled = false;
    3561             : 
    3562           0 :         if (intel_irqs_enabled(dev_priv))
    3563           0 :                 valleyview_display_irqs_uninstall(dev_priv);
    3564           0 : }
    3565             : 
    3566           0 : static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
    3567             : {
    3568           0 :         dev_priv->irq_mask = ~0;
    3569             : 
    3570           0 :         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
    3571           0 :         POSTING_READ(PORT_HOTPLUG_EN);
    3572             : 
    3573           0 :         I915_WRITE(VLV_IIR, 0xffffffff);
    3574           0 :         I915_WRITE(VLV_IIR, 0xffffffff);
    3575           0 :         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
    3576           0 :         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
    3577           0 :         POSTING_READ(VLV_IMR);
    3578             : 
    3579             :         /* Interrupt setup is already guaranteed to be single-threaded, this is
    3580             :          * just to make the assert_spin_locked check happy. */
    3581           0 :         spin_lock_irq(&dev_priv->irq_lock);
    3582           0 :         if (dev_priv->display_irqs_enabled)
    3583           0 :                 valleyview_display_irqs_install(dev_priv);
    3584           0 :         spin_unlock_irq(&dev_priv->irq_lock);
    3585           0 : }
    3586             : 
    3587           0 : static int valleyview_irq_postinstall(struct drm_device *dev)
    3588             : {
    3589           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3590             : 
    3591           0 :         vlv_display_irq_postinstall(dev_priv);
    3592             : 
    3593           0 :         gen5_gt_irq_postinstall(dev);
    3594             : 
    3595             :         /* ack & enable invalid PTE error interrupts */
    3596             : #if 0 /* FIXME: add support to irq handler for checking these bits */
    3597             :         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
    3598             :         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
    3599             : #endif
    3600             : 
    3601           0 :         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
    3602             : 
    3603           0 :         return 0;
    3604             : }
    3605             : 
    3606           0 : static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
    3607             : {
    3608             :         /* These are interrupts we'll toggle with the ring mask register */
    3609             :         uint32_t gt_interrupts[] = {
    3610             :                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
    3611             :                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
    3612             :                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
    3613             :                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
    3614             :                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
    3615             :                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
    3616             :                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
    3617             :                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
    3618             :                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
    3619             :                 0,
    3620             :                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
    3621             :                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
    3622             :                 };
    3623             : 
    3624           0 :         dev_priv->pm_irq_mask = 0xffffffff;
    3625           0 :         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
    3626           0 :         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
    3627             :         /*
    3628             :          * RPS interrupts will get enabled/disabled on demand when RPS itself
    3629             :          * is enabled/disabled.
    3630             :          */
    3631           0 :         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
    3632           0 :         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
    3633           0 : }
    3634             : 
    3635           0 : static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
    3636             : {
    3637             :         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
    3638             :         uint32_t de_pipe_enables;
    3639             :         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
    3640             :         u32 de_port_enables;
    3641             :         enum pipe pipe;
    3642             : 
    3643           0 :         if (INTEL_INFO(dev_priv)->gen >= 9) {
    3644             :                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
    3645             :                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
    3646             :                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
    3647             :                                   GEN9_AUX_CHANNEL_D;
    3648           0 :                 if (IS_BROXTON(dev_priv))
    3649           0 :                         de_port_masked |= BXT_DE_PORT_GMBUS;
    3650             :         } else {
    3651             :                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
    3652             :                                   GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
    3653             :         }
    3654             : 
    3655           0 :         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
    3656             :                                            GEN8_PIPE_FIFO_UNDERRUN;
    3657             : 
    3658             :         de_port_enables = de_port_masked;
    3659           0 :         if (IS_BROXTON(dev_priv))
    3660           0 :                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
    3661           0 :         else if (IS_BROADWELL(dev_priv))
    3662           0 :                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
    3663             : 
    3664           0 :         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
    3665           0 :         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
    3666           0 :         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
    3667             : 
    3668           0 :         for_each_pipe(dev_priv, pipe)
    3669           0 :                 if (intel_display_power_is_enabled(dev_priv,
    3670             :                                 POWER_DOMAIN_PIPE(pipe)))
    3671           0 :                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
    3672             :                                           dev_priv->de_irq_mask[pipe],
    3673             :                                           de_pipe_enables);
    3674             : 
    3675           0 :         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
    3676             : 
    3677           0 :         GEN5_IRQ_INIT(GEN8_DE_MISC_, ~GEN8_DE_MISC_GSE, GEN8_DE_MISC_GSE);
    3678           0 : }
    3679             : 
    3680           0 : static int gen8_irq_postinstall(struct drm_device *dev)
    3681             : {
    3682           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3683             : 
    3684           0 :         if (HAS_PCH_SPLIT(dev))
    3685           0 :                 ibx_irq_pre_postinstall(dev);
    3686             : 
    3687           0 :         gen8_gt_irq_postinstall(dev_priv);
    3688           0 :         gen8_de_irq_postinstall(dev_priv);
    3689             : 
    3690           0 :         if (HAS_PCH_SPLIT(dev))
    3691           0 :                 ibx_irq_postinstall(dev);
    3692             : 
    3693           0 :         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
    3694           0 :         POSTING_READ(GEN8_MASTER_IRQ);
    3695             : 
    3696           0 :         return 0;
    3697             : }
    3698             : 
    3699           0 : static int cherryview_irq_postinstall(struct drm_device *dev)
    3700             : {
    3701           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3702             : 
    3703           0 :         vlv_display_irq_postinstall(dev_priv);
    3704             : 
    3705           0 :         gen8_gt_irq_postinstall(dev_priv);
    3706             : 
    3707           0 :         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
    3708           0 :         POSTING_READ(GEN8_MASTER_IRQ);
    3709             : 
    3710           0 :         return 0;
    3711             : }
    3712             : 
    3713           0 : static void gen8_irq_uninstall(struct drm_device *dev)
    3714             : {
    3715           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3716             : 
    3717           0 :         if (!dev_priv)
    3718           0 :                 return;
    3719             : 
    3720           0 :         gen8_irq_reset(dev);
    3721           0 : }
    3722             : 
    3723           0 : static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
    3724             : {
    3725             :         /* Interrupt setup is already guaranteed to be single-threaded, this is
    3726             :          * just to make the assert_spin_locked check happy. */
    3727           0 :         spin_lock_irq(&dev_priv->irq_lock);
    3728           0 :         if (dev_priv->display_irqs_enabled)
    3729           0 :                 valleyview_display_irqs_uninstall(dev_priv);
    3730           0 :         spin_unlock_irq(&dev_priv->irq_lock);
    3731             : 
    3732           0 :         vlv_display_irq_reset(dev_priv);
    3733             : 
    3734           0 :         dev_priv->irq_mask = ~0;
    3735           0 : }
    3736             : 
    3737           0 : static void valleyview_irq_uninstall(struct drm_device *dev)
    3738             : {
    3739           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3740             : 
    3741           0 :         if (!dev_priv)
    3742           0 :                 return;
    3743             : 
    3744           0 :         I915_WRITE(VLV_MASTER_IER, 0);
    3745             : 
    3746           0 :         gen5_gt_irq_reset(dev);
    3747             : 
    3748           0 :         I915_WRITE(HWSTAM, 0xffffffff);
    3749             : 
    3750           0 :         vlv_display_irq_uninstall(dev_priv);
    3751           0 : }
    3752             : 
    3753           0 : static void cherryview_irq_uninstall(struct drm_device *dev)
    3754             : {
    3755           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3756             : 
    3757           0 :         if (!dev_priv)
    3758           0 :                 return;
    3759             : 
    3760           0 :         I915_WRITE(GEN8_MASTER_IRQ, 0);
    3761           0 :         POSTING_READ(GEN8_MASTER_IRQ);
    3762             : 
    3763           0 :         gen8_gt_irq_reset(dev_priv);
    3764             : 
    3765           0 :         GEN5_IRQ_RESET(GEN8_PCU_);
    3766             : 
    3767           0 :         vlv_display_irq_uninstall(dev_priv);
    3768           0 : }
    3769             : 
    3770           0 : static void ironlake_irq_uninstall(struct drm_device *dev)
    3771             : {
    3772           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3773             : 
    3774           0 :         if (!dev_priv)
    3775           0 :                 return;
    3776             : 
    3777           0 :         ironlake_irq_reset(dev);
    3778           0 : }
    3779             : 
    3780           0 : static void i8xx_irq_preinstall(struct drm_device * dev)
    3781             : {
    3782           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3783             :         int pipe;
    3784             : 
    3785           0 :         for_each_pipe(dev_priv, pipe)
    3786           0 :                 I915_WRITE(PIPESTAT(pipe), 0);
    3787           0 :         I915_WRITE16(IMR, 0xffff);
    3788           0 :         I915_WRITE16(IER, 0x0);
    3789           0 :         POSTING_READ16(IER);
    3790           0 : }
    3791             : 
    3792           0 : static int i8xx_irq_postinstall(struct drm_device *dev)
    3793             : {
    3794           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3795             : 
    3796           0 :         I915_WRITE16(EMR,
    3797             :                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
    3798             : 
    3799             :         /* Unmask the interrupts that we always want on. */
    3800           0 :         dev_priv->irq_mask =
    3801             :                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
    3802             :                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
    3803             :                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
    3804             :                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
    3805           0 :         I915_WRITE16(IMR, dev_priv->irq_mask);
    3806             : 
    3807           0 :         I915_WRITE16(IER,
    3808             :                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
    3809             :                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
    3810             :                      I915_USER_INTERRUPT);
    3811           0 :         POSTING_READ16(IER);
    3812             : 
    3813             :         /* Interrupt setup is already guaranteed to be single-threaded, this is
    3814             :          * just to make the assert_spin_locked check happy. */
    3815           0 :         spin_lock_irq(&dev_priv->irq_lock);
    3816           0 :         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
    3817           0 :         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
    3818           0 :         spin_unlock_irq(&dev_priv->irq_lock);
    3819             : 
    3820           0 :         return 0;
    3821             : }
    3822             : 
    3823             : /*
    3824             :  * Returns true when a page flip has completed.
    3825             :  */
    3826           0 : static bool i8xx_handle_vblank(struct drm_device *dev,
    3827             :                                int plane, int pipe, u32 iir)
    3828             : {
    3829           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3830           0 :         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
    3831             : 
    3832           0 :         if (!intel_pipe_handle_vblank(dev, pipe))
    3833           0 :                 return false;
    3834             : 
    3835           0 :         if ((iir & flip_pending) == 0)
    3836             :                 goto check_page_flip;
    3837             : 
    3838             :         /* We detect FlipDone by looking for the change in PendingFlip from '1'
    3839             :          * to '0' on the following vblank, i.e. IIR has the Pendingflip
    3840             :          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
    3841             :          * the flip is completed (no longer pending). Since this doesn't raise
    3842             :          * an interrupt per se, we watch for the change at vblank.
    3843             :          */
    3844           0 :         if (I915_READ16(ISR) & flip_pending)
    3845             :                 goto check_page_flip;
    3846             : 
    3847           0 :         intel_prepare_page_flip(dev, plane);
    3848           0 :         intel_finish_page_flip(dev, pipe);
    3849           0 :         return true;
    3850             : 
    3851             : check_page_flip:
    3852           0 :         intel_check_page_flip(dev, pipe);
    3853           0 :         return false;
    3854           0 : }
    3855             : 
    3856           0 : static irqreturn_t i8xx_irq_handler(int irq, void *arg)
    3857             : {
    3858           0 :         struct drm_device *dev = arg;
    3859           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3860             :         u16 iir, new_iir;
    3861           0 :         u32 pipe_stats[2];
    3862             :         int pipe;
    3863             :         u16 flip_mask =
    3864             :                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
    3865             :                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
    3866             : 
    3867           0 :         if (!intel_irqs_enabled(dev_priv))
    3868           0 :                 return IRQ_NONE;
    3869             : 
    3870           0 :         iir = I915_READ16(IIR);
    3871           0 :         if (iir == 0)
    3872           0 :                 return IRQ_NONE;
    3873             : 
    3874           0 :         while (iir & ~flip_mask) {
    3875             :                 /* Can't rely on pipestat interrupt bit in iir as it might
    3876             :                  * have been cleared after the pipestat interrupt was received.
    3877             :                  * It doesn't set the bit in iir again, but it still produces
    3878             :                  * interrupts (for non-MSI).
    3879             :                  */
    3880           0 :                 spin_lock(&dev_priv->irq_lock);
    3881           0 :                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
    3882             :                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
    3883             : 
    3884           0 :                 for_each_pipe(dev_priv, pipe) {
    3885           0 :                         int reg = PIPESTAT(pipe);
    3886           0 :                         pipe_stats[pipe] = I915_READ(reg);
    3887             : 
    3888             :                         /*
    3889             :                          * Clear the PIPE*STAT regs before the IIR
    3890             :                          */
    3891           0 :                         if (pipe_stats[pipe] & 0x8000ffff)
    3892           0 :                                 I915_WRITE(reg, pipe_stats[pipe]);
    3893             :                 }
    3894           0 :                 spin_unlock(&dev_priv->irq_lock);
    3895             : 
    3896           0 :                 I915_WRITE16(IIR, iir & ~flip_mask);
    3897           0 :                 new_iir = I915_READ16(IIR); /* Flush posted writes */
    3898             : 
    3899           0 :                 if (iir & I915_USER_INTERRUPT)
    3900           0 :                         notify_ring(&dev_priv->ring[RCS]);
    3901             : 
    3902           0 :                 for_each_pipe(dev_priv, pipe) {
    3903             :                         int plane = pipe;
    3904           0 :                         if (HAS_FBC(dev))
    3905           0 :                                 plane = !plane;
    3906             : 
    3907           0 :                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
    3908           0 :                             i8xx_handle_vblank(dev, plane, pipe, iir))
    3909           0 :                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
    3910             : 
    3911           0 :                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
    3912           0 :                                 i9xx_pipe_crc_irq_handler(dev, pipe);
    3913             : 
    3914           0 :                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
    3915           0 :                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
    3916             :                                                                     pipe);
    3917             :                 }
    3918             : 
    3919             :                 iir = new_iir;
    3920             :         }
    3921             : 
    3922           0 :         return IRQ_HANDLED;
    3923           0 : }
    3924             : 
    3925           0 : static void i8xx_irq_uninstall(struct drm_device * dev)
    3926             : {
    3927           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3928             :         int pipe;
    3929             : 
    3930           0 :         for_each_pipe(dev_priv, pipe) {
    3931             :                 /* Clear enable bits; then clear status bits */
    3932           0 :                 I915_WRITE(PIPESTAT(pipe), 0);
    3933           0 :                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
    3934             :         }
    3935           0 :         I915_WRITE16(IMR, 0xffff);
    3936           0 :         I915_WRITE16(IER, 0x0);
    3937           0 :         I915_WRITE16(IIR, I915_READ16(IIR));
    3938           0 : }
    3939             : 
    3940           0 : static void i915_irq_preinstall(struct drm_device * dev)
    3941             : {
    3942           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3943             :         int pipe;
    3944             : 
    3945           0 :         if (I915_HAS_HOTPLUG(dev)) {
    3946           0 :                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
    3947           0 :                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
    3948           0 :         }
    3949             : 
    3950           0 :         I915_WRITE16(HWSTAM, 0xeffe);
    3951           0 :         for_each_pipe(dev_priv, pipe)
    3952           0 :                 I915_WRITE(PIPESTAT(pipe), 0);
    3953           0 :         I915_WRITE(IMR, 0xffffffff);
    3954           0 :         I915_WRITE(IER, 0x0);
    3955           0 :         POSTING_READ(IER);
    3956           0 : }
    3957             : 
    3958           0 : static int i915_irq_postinstall(struct drm_device *dev)
    3959             : {
    3960           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    3961             :         u32 enable_mask;
    3962             : 
    3963           0 :         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
    3964             : 
    3965             :         /* Unmask the interrupts that we always want on. */
    3966           0 :         dev_priv->irq_mask =
    3967             :                 ~(I915_ASLE_INTERRUPT |
    3968             :                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
    3969             :                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
    3970             :                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
    3971             :                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
    3972             : 
    3973             :         enable_mask =
    3974             :                 I915_ASLE_INTERRUPT |
    3975             :                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
    3976             :                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
    3977             :                 I915_USER_INTERRUPT;
    3978             : 
    3979           0 :         if (I915_HAS_HOTPLUG(dev)) {
    3980           0 :                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
    3981           0 :                 POSTING_READ(PORT_HOTPLUG_EN);
    3982             : 
    3983             :                 /* Enable in IER... */
    3984             :                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
    3985             :                 /* and unmask in IMR */
    3986           0 :                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
    3987           0 :         }
    3988             : 
    3989           0 :         I915_WRITE(IMR, dev_priv->irq_mask);
    3990           0 :         I915_WRITE(IER, enable_mask);
    3991           0 :         POSTING_READ(IER);
    3992             : 
    3993           0 :         i915_enable_asle_pipestat(dev);
    3994             : 
    3995             :         /* Interrupt setup is already guaranteed to be single-threaded, this is
    3996             :          * just to make the assert_spin_locked check happy. */
    3997           0 :         spin_lock_irq(&dev_priv->irq_lock);
    3998           0 :         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
    3999           0 :         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
    4000           0 :         spin_unlock_irq(&dev_priv->irq_lock);
    4001             : 
    4002           0 :         return 0;
    4003             : }
    4004             : 
    4005             : /*
    4006             :  * Returns true when a page flip has completed.
    4007             :  */
    4008           0 : static bool i915_handle_vblank(struct drm_device *dev,
    4009             :                                int plane, int pipe, u32 iir)
    4010             : {
    4011           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    4012           0 :         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
    4013             : 
    4014           0 :         if (!intel_pipe_handle_vblank(dev, pipe))
    4015           0 :                 return false;
    4016             : 
    4017           0 :         if ((iir & flip_pending) == 0)
    4018             :                 goto check_page_flip;
    4019             : 
    4020             :         /* We detect FlipDone by looking for the change in PendingFlip from '1'
    4021             :          * to '0' on the following vblank, i.e. IIR has the Pendingflip
    4022             :          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
    4023             :          * the flip is completed (no longer pending). Since this doesn't raise
    4024             :          * an interrupt per se, we watch for the change at vblank.
    4025             :          */
    4026           0 :         if (I915_READ(ISR) & flip_pending)
    4027             :                 goto check_page_flip;
    4028             : 
    4029           0 :         intel_prepare_page_flip(dev, plane);
    4030           0 :         intel_finish_page_flip(dev, pipe);
    4031           0 :         return true;
    4032             : 
    4033             : check_page_flip:
    4034           0 :         intel_check_page_flip(dev, pipe);
    4035           0 :         return false;
    4036           0 : }
    4037             : 
    4038           0 : static irqreturn_t i915_irq_handler(int irq, void *arg)
    4039             : {
    4040           0 :         struct drm_device *dev = arg;
    4041           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    4042           0 :         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
    4043             :         u32 flip_mask =
    4044             :                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
    4045             :                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
    4046             :         int pipe, ret = IRQ_NONE;
    4047             : 
    4048           0 :         if (!intel_irqs_enabled(dev_priv))
    4049           0 :                 return IRQ_NONE;
    4050             : 
    4051           0 :         iir = I915_READ(IIR);
    4052           0 :         do {
    4053           0 :                 bool irq_received = (iir & ~flip_mask) != 0;
    4054             :                 bool blc_event = false;
    4055             : 
    4056             :                 /* Can't rely on pipestat interrupt bit in iir as it might
    4057             :                  * have been cleared after the pipestat interrupt was received.
    4058             :                  * It doesn't set the bit in iir again, but it still produces
    4059             :                  * interrupts (for non-MSI).
    4060             :                  */
    4061           0 :                 spin_lock(&dev_priv->irq_lock);
    4062           0 :                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
    4063             :                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
    4064             : 
    4065           0 :                 for_each_pipe(dev_priv, pipe) {
    4066           0 :                         int reg = PIPESTAT(pipe);
    4067           0 :                         pipe_stats[pipe] = I915_READ(reg);
    4068             : 
    4069             :                         /* Clear the PIPE*STAT regs before the IIR */
    4070           0 :                         if (pipe_stats[pipe] & 0x8000ffff) {
    4071           0 :                                 I915_WRITE(reg, pipe_stats[pipe]);
    4072             :                                 irq_received = true;
    4073           0 :                         }
    4074             :                 }
    4075           0 :                 spin_unlock(&dev_priv->irq_lock);
    4076             : 
    4077           0 :                 if (!irq_received)
    4078           0 :                         break;
    4079             : 
    4080             :                 /* Consume port.  Then clear IIR or we'll miss events */
    4081           0 :                 if (I915_HAS_HOTPLUG(dev) &&
    4082           0 :                     iir & I915_DISPLAY_PORT_INTERRUPT)
    4083           0 :                         i9xx_hpd_irq_handler(dev);
    4084             : 
    4085           0 :                 I915_WRITE(IIR, iir & ~flip_mask);
    4086           0 :                 new_iir = I915_READ(IIR); /* Flush posted writes */
    4087             : 
    4088           0 :                 if (iir & I915_USER_INTERRUPT)
    4089           0 :                         notify_ring(&dev_priv->ring[RCS]);
    4090             : 
    4091           0 :                 for_each_pipe(dev_priv, pipe) {
    4092             :                         int plane = pipe;
    4093           0 :                         if (HAS_FBC(dev))
    4094           0 :                                 plane = !plane;
    4095             : 
    4096           0 :                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
    4097           0 :                             i915_handle_vblank(dev, plane, pipe, iir))
    4098           0 :                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
    4099             : 
    4100           0 :                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
    4101           0 :                                 blc_event = true;
    4102             : 
    4103           0 :                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
    4104           0 :                                 i9xx_pipe_crc_irq_handler(dev, pipe);
    4105             : 
    4106           0 :                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
    4107           0 :                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
    4108             :                                                                     pipe);
    4109             :                 }
    4110             : 
    4111           0 :                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
    4112           0 :                         intel_opregion_asle_intr(dev);
    4113             : 
    4114             :                 /* With MSI, interrupts are only generated when iir
    4115             :                  * transitions from zero to nonzero.  If another bit got
    4116             :                  * set while we were handling the existing iir bits, then
    4117             :                  * we would never get another interrupt.
    4118             :                  *
    4119             :                  * This is fine on non-MSI as well, as if we hit this path
    4120             :                  * we avoid exiting the interrupt handler only to generate
    4121             :                  * another one.
    4122             :                  *
    4123             :                  * Note that for MSI this could cause a stray interrupt report
    4124             :                  * if an interrupt landed in the time between writing IIR and
    4125             :                  * the posting read.  This should be rare enough to never
    4126             :                  * trigger the 99% of 100,000 interrupts test for disabling
    4127             :                  * stray interrupts.
    4128             :                  */
    4129             :                 ret = IRQ_HANDLED;
    4130             :                 iir = new_iir;
    4131           0 :         } while (iir & ~flip_mask);
    4132             : 
    4133           0 :         return ret;
    4134           0 : }
    4135             : 
    4136           0 : static void i915_irq_uninstall(struct drm_device * dev)
    4137             : {
    4138           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    4139             :         int pipe;
    4140             : 
    4141           0 :         if (I915_HAS_HOTPLUG(dev)) {
    4142           0 :                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
    4143           0 :                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
    4144           0 :         }
    4145             : 
    4146           0 :         I915_WRITE16(HWSTAM, 0xffff);
    4147           0 :         for_each_pipe(dev_priv, pipe) {
    4148             :                 /* Clear enable bits; then clear status bits */
    4149           0 :                 I915_WRITE(PIPESTAT(pipe), 0);
    4150           0 :                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
    4151             :         }
    4152           0 :         I915_WRITE(IMR, 0xffffffff);
    4153           0 :         I915_WRITE(IER, 0x0);
    4154             : 
    4155           0 :         I915_WRITE(IIR, I915_READ(IIR));
    4156           0 : }
    4157             : 
    4158           0 : static void i965_irq_preinstall(struct drm_device * dev)
    4159             : {
    4160           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    4161             :         int pipe;
    4162             : 
    4163           0 :         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
    4164           0 :         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
    4165             : 
    4166           0 :         I915_WRITE(HWSTAM, 0xeffe);
    4167           0 :         for_each_pipe(dev_priv, pipe)
    4168           0 :                 I915_WRITE(PIPESTAT(pipe), 0);
    4169           0 :         I915_WRITE(IMR, 0xffffffff);
    4170           0 :         I915_WRITE(IER, 0x0);
    4171           0 :         POSTING_READ(IER);
    4172           0 : }
    4173             : 
    4174           0 : static int i965_irq_postinstall(struct drm_device *dev)
    4175             : {
    4176           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    4177             :         u32 enable_mask;
    4178             :         u32 error_mask;
    4179             : 
    4180             :         /* Unmask the interrupts that we always want on. */
    4181           0 :         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
    4182             :                                I915_DISPLAY_PORT_INTERRUPT |
    4183             :                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
    4184             :                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
    4185             :                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
    4186             :                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
    4187             :                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
    4188             : 
    4189             :         enable_mask = ~dev_priv->irq_mask;
    4190             :         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
    4191             :                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
    4192             :         enable_mask |= I915_USER_INTERRUPT;
    4193             : 
    4194           0 :         if (IS_G4X(dev))
    4195           0 :                 enable_mask |= I915_BSD_USER_INTERRUPT;
    4196             : 
    4197             :         /* Interrupt setup is already guaranteed to be single-threaded, this is
    4198             :          * just to make the assert_spin_locked check happy. */
    4199           0 :         spin_lock_irq(&dev_priv->irq_lock);
    4200           0 :         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
    4201           0 :         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
    4202           0 :         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
    4203           0 :         spin_unlock_irq(&dev_priv->irq_lock);
    4204             : 
    4205             :         /*
    4206             :          * Enable some error detection, note the instruction error mask
    4207             :          * bit is reserved, so we leave it masked.
    4208             :          */
    4209           0 :         if (IS_G4X(dev)) {
    4210             :                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
    4211             :                                GM45_ERROR_MEM_PRIV |
    4212             :                                GM45_ERROR_CP_PRIV |
    4213             :                                I915_ERROR_MEMORY_REFRESH);
    4214           0 :         } else {
    4215             :                 error_mask = ~(I915_ERROR_PAGE_TABLE |
    4216             :                                I915_ERROR_MEMORY_REFRESH);
    4217             :         }
    4218           0 :         I915_WRITE(EMR, error_mask);
    4219             : 
    4220           0 :         I915_WRITE(IMR, dev_priv->irq_mask);
    4221           0 :         I915_WRITE(IER, enable_mask);
    4222           0 :         POSTING_READ(IER);
    4223             : 
    4224           0 :         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
    4225           0 :         POSTING_READ(PORT_HOTPLUG_EN);
    4226             : 
    4227           0 :         i915_enable_asle_pipestat(dev);
    4228             : 
    4229           0 :         return 0;
    4230             : }
    4231             : 
    4232           0 : static void i915_hpd_irq_setup(struct drm_device *dev)
    4233             : {
    4234           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    4235             :         u32 hotplug_en;
    4236             : 
    4237           0 :         assert_spin_locked(&dev_priv->irq_lock);
    4238             : 
    4239             :         /* Note HDMI and DP share hotplug bits */
    4240             :         /* enable bits are the same for all generations */
    4241           0 :         hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
    4242             :         /* Programming the CRT detection parameters tends
    4243             :            to generate a spurious hotplug event about three
    4244             :            seconds later.  So just do it once.
    4245             :         */
    4246           0 :         if (IS_G4X(dev))
    4247           0 :                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
    4248           0 :         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
    4249             : 
    4250             :         /* Ignore TV since it's buggy */
    4251           0 :         i915_hotplug_interrupt_update_locked(dev_priv,
    4252             :                                              HOTPLUG_INT_EN_MASK |
    4253             :                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
    4254             :                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
    4255             :                                              hotplug_en);
    4256           0 : }
    4257             : 
    4258           0 : static irqreturn_t i965_irq_handler(int irq, void *arg)
    4259             : {
    4260           0 :         struct drm_device *dev = arg;
    4261           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    4262             :         u32 iir, new_iir;
    4263           0 :         u32 pipe_stats[I915_MAX_PIPES];
    4264             :         int ret = IRQ_NONE, pipe;
    4265             :         u32 flip_mask =
    4266             :                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
    4267             :                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
    4268             : 
    4269           0 :         if (!intel_irqs_enabled(dev_priv))
    4270           0 :                 return IRQ_NONE;
    4271             : 
    4272           0 :         iir = I915_READ(IIR);
    4273             : 
    4274           0 :         for (;;) {
    4275           0 :                 bool irq_received = (iir & ~flip_mask) != 0;
    4276             :                 bool blc_event = false;
    4277             : 
    4278             :                 /* Can't rely on pipestat interrupt bit in iir as it might
    4279             :                  * have been cleared after the pipestat interrupt was received.
    4280             :                  * It doesn't set the bit in iir again, but it still produces
    4281             :                  * interrupts (for non-MSI).
    4282             :                  */
    4283           0 :                 spin_lock(&dev_priv->irq_lock);
    4284           0 :                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
    4285             :                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
    4286             : 
    4287           0 :                 for_each_pipe(dev_priv, pipe) {
    4288           0 :                         int reg = PIPESTAT(pipe);
    4289           0 :                         pipe_stats[pipe] = I915_READ(reg);
    4290             : 
    4291             :                         /*
    4292             :                          * Clear the PIPE*STAT regs before the IIR
    4293             :                          */
    4294           0 :                         if (pipe_stats[pipe] & 0x8000ffff) {
    4295           0 :                                 I915_WRITE(reg, pipe_stats[pipe]);
    4296             :                                 irq_received = true;
    4297           0 :                         }
    4298             :                 }
    4299           0 :                 spin_unlock(&dev_priv->irq_lock);
    4300             : 
    4301           0 :                 if (!irq_received)
    4302           0 :                         break;
    4303             : 
    4304             :                 ret = IRQ_HANDLED;
    4305             : 
    4306             :                 /* Consume port.  Then clear IIR or we'll miss events */
    4307           0 :                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
    4308           0 :                         i9xx_hpd_irq_handler(dev);
    4309             : 
    4310           0 :                 I915_WRITE(IIR, iir & ~flip_mask);
    4311           0 :                 new_iir = I915_READ(IIR); /* Flush posted writes */
    4312             : 
    4313           0 :                 if (iir & I915_USER_INTERRUPT)
    4314           0 :                         notify_ring(&dev_priv->ring[RCS]);
    4315           0 :                 if (iir & I915_BSD_USER_INTERRUPT)
    4316           0 :                         notify_ring(&dev_priv->ring[VCS]);
    4317             : 
    4318           0 :                 for_each_pipe(dev_priv, pipe) {
    4319           0 :                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
    4320           0 :                             i915_handle_vblank(dev, pipe, pipe, iir))
    4321           0 :                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
    4322             : 
    4323           0 :                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
    4324           0 :                                 blc_event = true;
    4325             : 
    4326           0 :                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
    4327           0 :                                 i9xx_pipe_crc_irq_handler(dev, pipe);
    4328             : 
    4329           0 :                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
    4330           0 :                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
    4331             :                 }
    4332             : 
    4333           0 :                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
    4334           0 :                         intel_opregion_asle_intr(dev);
    4335             : 
    4336           0 :                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
    4337           0 :                         gmbus_irq_handler(dev);
    4338             : 
    4339             :                 /* With MSI, interrupts are only generated when iir
    4340             :                  * transitions from zero to nonzero.  If another bit got
    4341             :                  * set while we were handling the existing iir bits, then
    4342             :                  * we would never get another interrupt.
    4343             :                  *
    4344             :                  * This is fine on non-MSI as well, as if we hit this path
    4345             :                  * we avoid exiting the interrupt handler only to generate
    4346             :                  * another one.
    4347             :                  *
    4348             :                  * Note that for MSI this could cause a stray interrupt report
    4349             :                  * if an interrupt landed in the time between writing IIR and
    4350             :                  * the posting read.  This should be rare enough to never
    4351             :                  * trigger the 99% of 100,000 interrupts test for disabling
    4352             :                  * stray interrupts.
    4353             :                  */
    4354             :                 iir = new_iir;
    4355           0 :         }
    4356             : 
    4357           0 :         return ret;
    4358           0 : }
    4359             : 
    4360           0 : static void i965_irq_uninstall(struct drm_device * dev)
    4361             : {
    4362           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    4363             :         int pipe;
    4364             : 
    4365           0 :         if (!dev_priv)
    4366           0 :                 return;
    4367             : 
    4368           0 :         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
    4369           0 :         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
    4370             : 
    4371           0 :         I915_WRITE(HWSTAM, 0xffffffff);
    4372           0 :         for_each_pipe(dev_priv, pipe)
    4373           0 :                 I915_WRITE(PIPESTAT(pipe), 0);
    4374           0 :         I915_WRITE(IMR, 0xffffffff);
    4375           0 :         I915_WRITE(IER, 0x0);
    4376             : 
    4377           0 :         for_each_pipe(dev_priv, pipe)
    4378           0 :                 I915_WRITE(PIPESTAT(pipe),
    4379             :                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
    4380           0 :         I915_WRITE(IIR, I915_READ(IIR));
    4381           0 : }
    4382             : 
    4383             : /**
    4384             :  * intel_irq_init - initializes irq support
    4385             :  * @dev_priv: i915 device instance
    4386             :  *
    4387             :  * This function initializes all the irq support including work items, timers
    4388             :  * and all the vtables. It does not setup the interrupt itself though.
    4389             :  */
    4390           0 : void intel_irq_init(struct drm_i915_private *dev_priv)
    4391             : {
    4392           0 :         struct drm_device *dev = dev_priv->dev;
    4393             : 
    4394           0 :         intel_hpd_init_work(dev_priv);
    4395             : 
    4396           0 :         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
    4397           0 :         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
    4398             : 
    4399             :         /* Let's track the enabled rps events */
    4400           0 :         if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
    4401             :                 /* WaGsvRC0ResidencyMethod:vlv */
    4402           0 :                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
    4403             :         else
    4404           0 :                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
    4405             : 
    4406           0 :         INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
    4407             :                           i915_hangcheck_elapsed);
    4408             : 
    4409             : #ifdef __linux__
    4410             :         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
    4411             : #endif
    4412             : 
    4413           0 :         if (IS_GEN2(dev_priv)) {
    4414           0 :                 dev->max_vblank_count = 0;
    4415           0 :                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
    4416           0 :         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
    4417           0 :                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
    4418           0 :                 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
    4419           0 :         } else {
    4420           0 :                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
    4421           0 :                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
    4422             :         }
    4423             : 
    4424             :         /*
    4425             :          * Opt out of the vblank disable timer on everything except gen2.
    4426             :          * Gen2 doesn't have a hardware frame counter and so depends on
    4427             :          * vblank interrupts to produce sane vblank seuquence numbers.
    4428             :          */
    4429           0 :         if (!IS_GEN2(dev_priv))
    4430           0 :                 dev->vblank_disable_immediate = true;
    4431             : 
    4432           0 :         dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
    4433           0 :         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
    4434             : 
    4435           0 :         if (IS_CHERRYVIEW(dev_priv)) {
    4436           0 :                 dev->driver->irq_handler = cherryview_irq_handler;
    4437           0 :                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
    4438           0 :                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
    4439           0 :                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
    4440           0 :                 dev->driver->enable_vblank = valleyview_enable_vblank;
    4441           0 :                 dev->driver->disable_vblank = valleyview_disable_vblank;
    4442           0 :                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
    4443           0 :         } else if (IS_VALLEYVIEW(dev_priv)) {
    4444           0 :                 dev->driver->irq_handler = valleyview_irq_handler;
    4445           0 :                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
    4446           0 :                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
    4447           0 :                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
    4448           0 :                 dev->driver->enable_vblank = valleyview_enable_vblank;
    4449           0 :                 dev->driver->disable_vblank = valleyview_disable_vblank;
    4450           0 :                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
    4451           0 :         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
    4452           0 :                 dev->driver->irq_handler = gen8_irq_handler;
    4453           0 :                 dev->driver->irq_preinstall = gen8_irq_reset;
    4454           0 :                 dev->driver->irq_postinstall = gen8_irq_postinstall;
    4455           0 :                 dev->driver->irq_uninstall = gen8_irq_uninstall;
    4456           0 :                 dev->driver->enable_vblank = gen8_enable_vblank;
    4457           0 :                 dev->driver->disable_vblank = gen8_disable_vblank;
    4458           0 :                 if (IS_BROXTON(dev))
    4459           0 :                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
    4460           0 :                 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
    4461           0 :                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
    4462             :                 else
    4463           0 :                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
    4464           0 :         } else if (HAS_PCH_SPLIT(dev)) {
    4465           0 :                 dev->driver->irq_handler = ironlake_irq_handler;
    4466           0 :                 dev->driver->irq_preinstall = ironlake_irq_reset;
    4467           0 :                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
    4468           0 :                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
    4469           0 :                 dev->driver->enable_vblank = ironlake_enable_vblank;
    4470           0 :                 dev->driver->disable_vblank = ironlake_disable_vblank;
    4471           0 :                 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
    4472           0 :         } else {
    4473           0 :                 if (INTEL_INFO(dev_priv)->gen == 2) {
    4474           0 :                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
    4475           0 :                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
    4476           0 :                         dev->driver->irq_handler = i8xx_irq_handler;
    4477           0 :                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
    4478           0 :                 } else if (INTEL_INFO(dev_priv)->gen == 3) {
    4479           0 :                         dev->driver->irq_preinstall = i915_irq_preinstall;
    4480           0 :                         dev->driver->irq_postinstall = i915_irq_postinstall;
    4481           0 :                         dev->driver->irq_uninstall = i915_irq_uninstall;
    4482           0 :                         dev->driver->irq_handler = i915_irq_handler;
    4483           0 :                 } else {
    4484           0 :                         dev->driver->irq_preinstall = i965_irq_preinstall;
    4485           0 :                         dev->driver->irq_postinstall = i965_irq_postinstall;
    4486           0 :                         dev->driver->irq_uninstall = i965_irq_uninstall;
    4487           0 :                         dev->driver->irq_handler = i965_irq_handler;
    4488             :                 }
    4489           0 :                 if (I915_HAS_HOTPLUG(dev_priv))
    4490           0 :                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
    4491           0 :                 dev->driver->enable_vblank = i915_enable_vblank;
    4492           0 :                 dev->driver->disable_vblank = i915_disable_vblank;
    4493             :         }
    4494           0 : }
    4495             : 
    4496             : /**
    4497             :  * intel_irq_install - enables the hardware interrupt
    4498             :  * @dev_priv: i915 device instance
    4499             :  *
    4500             :  * This function enables the hardware interrupt handling, but leaves the hotplug
    4501             :  * handling still disabled. It is called after intel_irq_init().
    4502             :  *
    4503             :  * In the driver load and resume code we need working interrupts in a few places
    4504             :  * but don't want to deal with the hassle of concurrent probe and hotplug
    4505             :  * workers. Hence the split into this two-stage approach.
    4506             :  */
    4507           0 : int intel_irq_install(struct drm_i915_private *dev_priv)
    4508             : {
    4509             :         /*
    4510             :          * We enable some interrupt sources in our postinstall hooks, so mark
    4511             :          * interrupts as enabled _before_ actually enabling them to avoid
    4512             :          * special cases in our ordering checks.
    4513             :          */
    4514           0 :         dev_priv->pm.irqs_enabled = true;
    4515             : 
    4516           0 :         return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
    4517             : }
    4518             : 
    4519             : /**
    4520             :  * intel_irq_uninstall - finilizes all irq handling
    4521             :  * @dev_priv: i915 device instance
    4522             :  *
    4523             :  * This stops interrupt and hotplug handling and unregisters and frees all
    4524             :  * resources acquired in the init functions.
    4525             :  */
    4526           0 : void intel_irq_uninstall(struct drm_i915_private *dev_priv)
    4527             : {
    4528           0 :         drm_irq_uninstall(dev_priv->dev);
    4529           0 :         intel_hpd_cancel_work(dev_priv);
    4530           0 :         dev_priv->pm.irqs_enabled = false;
    4531           0 : }
    4532             : 
    4533             : /**
    4534             :  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
    4535             :  * @dev_priv: i915 device instance
    4536             :  *
    4537             :  * This function is used to disable interrupts at runtime, both in the runtime
    4538             :  * pm and the system suspend/resume code.
    4539             :  */
    4540           0 : void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
    4541             : {
    4542           0 :         dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
    4543           0 :         dev_priv->pm.irqs_enabled = false;
    4544             :         synchronize_irq(dev_priv->dev->irq);
    4545           0 : }
    4546             : 
    4547             : /**
    4548             :  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
    4549             :  * @dev_priv: i915 device instance
    4550             :  *
    4551             :  * This function is used to enable interrupts at runtime, both in the runtime
    4552             :  * pm and the system suspend/resume code.
    4553             :  */
    4554           0 : void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
    4555             : {
    4556           0 :         dev_priv->pm.irqs_enabled = true;
    4557           0 :         dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
    4558           0 :         dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
    4559           0 : }

Generated by: LCOV version 1.13