LCOV - code coverage report
Current view: top level - dev/pci/drm/i915 - intel_psr.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 318 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 23 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  * Copyright © 2014 Intel Corporation
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice (including the next
      12             :  * paragraph) shall be included in all copies or substantial portions of the
      13             :  * Software.
      14             :  *
      15             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      16             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      17             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      18             :  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      19             :  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
      20             :  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
      21             :  * DEALINGS IN THE SOFTWARE.
      22             :  */
      23             : 
      24             : /**
      25             :  * DOC: Panel Self Refresh (PSR/SRD)
      26             :  *
      27             :  * Since Haswell Display controller supports Panel Self-Refresh on display
      28             :  * panels witch have a remote frame buffer (RFB) implemented according to PSR
      29             :  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
      30             :  * when system is idle but display is on as it eliminates display refresh
      31             :  * request to DDR memory completely as long as the frame buffer for that
      32             :  * display is unchanged.
      33             :  *
      34             :  * Panel Self Refresh must be supported by both Hardware (source) and
      35             :  * Panel (sink).
      36             :  *
      37             :  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
      38             :  * to power down the link and memory controller. For DSI panels the same idea
      39             :  * is called "manual mode".
      40             :  *
      41             :  * The implementation uses the hardware-based PSR support which automatically
      42             :  * enters/exits self-refresh mode. The hardware takes care of sending the
      43             :  * required DP aux message and could even retrain the link (that part isn't
      44             :  * enabled yet though). The hardware also keeps track of any frontbuffer
      45             :  * changes to know when to exit self-refresh mode again. Unfortunately that
      46             :  * part doesn't work too well, hence why the i915 PSR support uses the
      47             :  * software frontbuffer tracking to make sure it doesn't miss a screen
      48             :  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
      49             :  * get called by the frontbuffer tracking code. Note that because of locking
      50             :  * issues the self-refresh re-enable code is done from a work queue, which
      51             :  * must be correctly synchronized/cancelled when shutting down the pipe."
      52             :  */
      53             : 
      54             : #include <dev/pci/drm/drmP.h>
      55             : 
      56             : #include "intel_drv.h"
      57             : #include "i915_drv.h"
      58             : 
      59           0 : static bool is_edp_psr(struct intel_dp *intel_dp)
      60             : {
      61           0 :         return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
      62             : }
      63             : 
      64           0 : static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
      65             : {
      66           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
      67             :         uint32_t val;
      68             : 
      69           0 :         val = I915_READ(VLV_PSRSTAT(pipe)) &
      70             :               VLV_EDP_PSR_CURR_STATE_MASK;
      71           0 :         return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
      72           0 :                (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
      73             : }
      74             : 
      75           0 : static void intel_psr_write_vsc(struct intel_dp *intel_dp,
      76             :                                 const struct edp_vsc_psr *vsc_psr)
      77             : {
      78           0 :         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
      79           0 :         struct drm_device *dev = dig_port->base.base.dev;
      80           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
      81           0 :         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
      82           0 :         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
      83           0 :         u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
      84           0 :         uint32_t *data = (uint32_t *) vsc_psr;
      85             :         unsigned int i;
      86             : 
      87             :         /* As per BSPec (Pipe Video Data Island Packet), we need to disable
      88             :            the video DIP being updated before program video DIP data buffer
      89             :            registers for DIP being updated. */
      90           0 :         I915_WRITE(ctl_reg, 0);
      91           0 :         POSTING_READ(ctl_reg);
      92             : 
      93           0 :         for (i = 0; i < sizeof(*vsc_psr); i += 4) {
      94           0 :                 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
      95             :                                                    i >> 2), *data);
      96           0 :                 data++;
      97             :         }
      98           0 :         for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
      99           0 :                 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
     100             :                                                    i >> 2), 0);
     101             : 
     102           0 :         I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
     103           0 :         POSTING_READ(ctl_reg);
     104           0 : }
     105             : 
     106           0 : static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
     107             : {
     108           0 :         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
     109           0 :         struct drm_device *dev = intel_dig_port->base.base.dev;
     110           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     111           0 :         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
     112           0 :         enum pipe pipe = to_intel_crtc(crtc)->pipe;
     113             :         uint32_t val;
     114             : 
     115             :         /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
     116           0 :         val  = I915_READ(VLV_VSCSDP(pipe));
     117           0 :         val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
     118           0 :         val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
     119           0 :         I915_WRITE(VLV_VSCSDP(pipe), val);
     120           0 : }
     121             : 
     122           0 : static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
     123             : {
     124           0 :         struct edp_vsc_psr psr_vsc;
     125             : 
     126             :         /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
     127           0 :         memset(&psr_vsc, 0, sizeof(psr_vsc));
     128           0 :         psr_vsc.sdp_header.HB0 = 0;
     129           0 :         psr_vsc.sdp_header.HB1 = 0x7;
     130           0 :         psr_vsc.sdp_header.HB2 = 0x3;
     131           0 :         psr_vsc.sdp_header.HB3 = 0xb;
     132           0 :         intel_psr_write_vsc(intel_dp, &psr_vsc);
     133           0 : }
     134             : 
     135           0 : static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
     136             : {
     137           0 :         struct edp_vsc_psr psr_vsc;
     138             : 
     139             :         /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
     140           0 :         memset(&psr_vsc, 0, sizeof(psr_vsc));
     141           0 :         psr_vsc.sdp_header.HB0 = 0;
     142           0 :         psr_vsc.sdp_header.HB1 = 0x7;
     143           0 :         psr_vsc.sdp_header.HB2 = 0x2;
     144           0 :         psr_vsc.sdp_header.HB3 = 0x8;
     145           0 :         intel_psr_write_vsc(intel_dp, &psr_vsc);
     146           0 : }
     147             : 
     148           0 : static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
     149             : {
     150           0 :         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
     151             :                            DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
     152           0 : }
     153             : 
     154           0 : static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
     155             : {
     156           0 :         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
     157           0 :         struct drm_device *dev = dig_port->base.base.dev;
     158           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     159             :         uint32_t aux_clock_divider;
     160             :         uint32_t aux_data_reg, aux_ctl_reg;
     161             :         int precharge = 0x3;
     162             :         static const uint8_t aux_msg[] = {
     163             :                 [0] = DP_AUX_NATIVE_WRITE << 4,
     164             :                 [1] = DP_SET_POWER >> 8,
     165             :                 [2] = DP_SET_POWER & 0xff,
     166             :                 [3] = 1 - 1,
     167             :                 [4] = DP_SET_POWER_D0,
     168             :         };
     169             :         int i;
     170             : 
     171             :         BUILD_BUG_ON(sizeof(aux_msg) > 20);
     172             : 
     173           0 :         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
     174             : 
     175           0 :         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
     176             :                            DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
     177             : 
     178             :         /* Enable AUX frame sync at sink */
     179           0 :         if (dev_priv->psr.aux_frame_sync)
     180           0 :                 drm_dp_dpcd_writeb(&intel_dp->aux,
     181             :                                 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
     182             :                                 DP_AUX_FRAME_SYNC_ENABLE);
     183             : 
     184           0 :         aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
     185           0 :                                 DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
     186           0 :         aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
     187           0 :                                 DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
     188             : 
     189             :         /* Setup AUX registers */
     190           0 :         for (i = 0; i < sizeof(aux_msg); i += 4)
     191           0 :                 I915_WRITE(aux_data_reg + i,
     192             :                            intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
     193             : 
     194           0 :         if (INTEL_INFO(dev)->gen >= 9) {
     195             :                 uint32_t val;
     196             : 
     197           0 :                 val = I915_READ(aux_ctl_reg);
     198           0 :                 val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
     199           0 :                 val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
     200           0 :                 val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
     201           0 :                 val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
     202             :                 /* Use hardcoded data values for PSR, frame sync and GTC */
     203           0 :                 val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
     204           0 :                 val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
     205           0 :                 val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
     206           0 :                 I915_WRITE(aux_ctl_reg, val);
     207           0 :         } else {
     208           0 :                 I915_WRITE(aux_ctl_reg,
     209             :                    DP_AUX_CH_CTL_TIME_OUT_400us |
     210             :                    (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
     211             :                    (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
     212             :                    (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
     213             :         }
     214             : 
     215           0 :         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
     216           0 : }
     217             : 
     218           0 : static void vlv_psr_enable_source(struct intel_dp *intel_dp)
     219             : {
     220           0 :         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
     221           0 :         struct drm_device *dev = dig_port->base.base.dev;
     222           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     223           0 :         struct drm_crtc *crtc = dig_port->base.base.crtc;
     224           0 :         enum pipe pipe = to_intel_crtc(crtc)->pipe;
     225             : 
     226             :         /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
     227           0 :         I915_WRITE(VLV_PSRCTL(pipe),
     228             :                    VLV_EDP_PSR_MODE_SW_TIMER |
     229             :                    VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
     230             :                    VLV_EDP_PSR_ENABLE);
     231           0 : }
     232             : 
     233           0 : static void vlv_psr_activate(struct intel_dp *intel_dp)
     234             : {
     235           0 :         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
     236           0 :         struct drm_device *dev = dig_port->base.base.dev;
     237           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     238           0 :         struct drm_crtc *crtc = dig_port->base.base.crtc;
     239           0 :         enum pipe pipe = to_intel_crtc(crtc)->pipe;
     240             : 
     241             :         /* Let's do the transition from PSR_state 1 to PSR_state 2
     242             :          * that is PSR transition to active - static frame transmission.
     243             :          * Then Hardware is responsible for the transition to PSR_state 3
     244             :          * that is PSR active - no Remote Frame Buffer (RFB) update.
     245             :          */
     246           0 :         I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
     247             :                    VLV_EDP_PSR_ACTIVE_ENTRY);
     248           0 : }
     249             : 
     250           0 : static void hsw_psr_enable_source(struct intel_dp *intel_dp)
     251             : {
     252           0 :         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
     253           0 :         struct drm_device *dev = dig_port->base.base.dev;
     254           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     255             : 
     256             :         uint32_t max_sleep_time = 0x1f;
     257             :         /* Lately it was identified that depending on panel idle frame count
     258             :          * calculated at HW can be off by 1. So let's use what came
     259             :          * from VBT + 1.
     260             :          * There are also other cases where panel demands at least 4
     261             :          * but VBT is not being set. To cover these 2 cases lets use
     262             :          * at least 5 when VBT isn't set to be on the safest side.
     263             :          */
     264           0 :         uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
     265           0 :                                dev_priv->vbt.psr.idle_frames + 1 : 5;
     266             :         uint32_t val = 0x0;
     267             :         const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
     268             : 
     269           0 :         if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
     270             :                 /* It doesn't mean we shouldn't send TPS patters, so let's
     271             :                    send the minimal TP1 possible and skip TP2. */
     272             :                 val |= EDP_PSR_TP1_TIME_100us;
     273             :                 val |= EDP_PSR_TP2_TP3_TIME_0us;
     274             :                 val |= EDP_PSR_SKIP_AUX_EXIT;
     275             :                 /* Sink should be able to train with the 5 or 6 idle patterns */
     276           0 :                 idle_frames += 4;
     277           0 :         }
     278             : 
     279           0 :         I915_WRITE(EDP_PSR_CTL(dev), val |
     280             :                    (IS_BROADWELL(dev) ? 0 : link_entry_time) |
     281             :                    max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
     282             :                    idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
     283             :                    EDP_PSR_ENABLE);
     284             : 
     285           0 :         if (dev_priv->psr.psr2_support)
     286           0 :                 I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
     287             :                                 EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
     288           0 : }
     289             : 
     290           0 : static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
     291             : {
     292           0 :         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
     293           0 :         struct drm_device *dev = dig_port->base.base.dev;
     294           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     295           0 :         struct drm_crtc *crtc = dig_port->base.base.crtc;
     296           0 :         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
     297             : 
     298           0 :         lockdep_assert_held(&dev_priv->psr.lock);
     299           0 :         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
     300           0 :         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
     301             : 
     302           0 :         dev_priv->psr.source_ok = false;
     303             : 
     304           0 :         if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
     305             :                 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
     306           0 :                 return false;
     307             :         }
     308             : 
     309           0 :         if (!i915.enable_psr) {
     310             :                 DRM_DEBUG_KMS("PSR disable by flag\n");
     311           0 :                 return false;
     312             :         }
     313             : 
     314           0 :         if (IS_HASWELL(dev) &&
     315           0 :             I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
     316             :                       S3D_ENABLE) {
     317             :                 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
     318           0 :                 return false;
     319             :         }
     320             : 
     321           0 :         if (IS_HASWELL(dev) &&
     322           0 :             intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
     323             :                 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
     324           0 :                 return false;
     325             :         }
     326             : 
     327           0 :         if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) ||
     328           0 :                                     (dig_port->port != PORT_A))) {
     329             :                 DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
     330           0 :                 return false;
     331             :         }
     332             : 
     333           0 :         dev_priv->psr.source_ok = true;
     334           0 :         return true;
     335           0 : }
     336             : 
     337           0 : static void intel_psr_activate(struct intel_dp *intel_dp)
     338             : {
     339           0 :         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
     340           0 :         struct drm_device *dev = intel_dig_port->base.base.dev;
     341           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     342             : 
     343           0 :         WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
     344           0 :         WARN_ON(dev_priv->psr.active);
     345             :         lockdep_assert_held(&dev_priv->psr.lock);
     346             : 
     347             :         /* Enable/Re-enable PSR on the host */
     348           0 :         if (HAS_DDI(dev))
     349             :                 /* On HSW+ after we enable PSR on source it will activate it
     350             :                  * as soon as it match configure idle_frame count. So
     351             :                  * we just actually enable it here on activation time.
     352             :                  */
     353           0 :                 hsw_psr_enable_source(intel_dp);
     354             :         else
     355           0 :                 vlv_psr_activate(intel_dp);
     356             : 
     357           0 :         dev_priv->psr.active = true;
     358           0 : }
     359             : 
     360             : /**
     361             :  * intel_psr_enable - Enable PSR
     362             :  * @intel_dp: Intel DP
     363             :  *
     364             :  * This function can only be called after the pipe is fully trained and enabled.
     365             :  */
     366           0 : void intel_psr_enable(struct intel_dp *intel_dp)
     367             : {
     368           0 :         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
     369           0 :         struct drm_device *dev = intel_dig_port->base.base.dev;
     370           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     371           0 :         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
     372             : 
     373           0 :         if (!HAS_PSR(dev)) {
     374             :                 DRM_DEBUG_KMS("PSR not supported on this platform\n");
     375           0 :                 return;
     376             :         }
     377             : 
     378           0 :         if (!is_edp_psr(intel_dp)) {
     379             :                 DRM_DEBUG_KMS("PSR not supported by this panel\n");
     380           0 :                 return;
     381             :         }
     382             : 
     383           0 :         mutex_lock(&dev_priv->psr.lock);
     384           0 :         if (dev_priv->psr.enabled) {
     385             :                 DRM_DEBUG_KMS("PSR already in use\n");
     386             :                 goto unlock;
     387             :         }
     388             : 
     389           0 :         if (!intel_psr_match_conditions(intel_dp))
     390             :                 goto unlock;
     391             : 
     392           0 :         dev_priv->psr.busy_frontbuffer_bits = 0;
     393             : 
     394           0 :         if (HAS_DDI(dev)) {
     395           0 :                 hsw_psr_setup_vsc(intel_dp);
     396             : 
     397           0 :                 if (dev_priv->psr.psr2_support) {
     398             :                         /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
     399           0 :                         if (crtc->config->pipe_src_w > 3200 ||
     400           0 :                                 crtc->config->pipe_src_h > 2000)
     401           0 :                                 dev_priv->psr.psr2_support = false;
     402             :                         else
     403           0 :                                 skl_psr_setup_su_vsc(intel_dp);
     404             :                 }
     405             : 
     406             :                 /* Avoid continuous PSR exit by masking memup and hpd */
     407           0 :                 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
     408             :                            EDP_PSR_DEBUG_MASK_HPD);
     409             : 
     410             :                 /* Enable PSR on the panel */
     411           0 :                 hsw_psr_enable_sink(intel_dp);
     412             : 
     413           0 :                 if (INTEL_INFO(dev)->gen >= 9)
     414           0 :                         intel_psr_activate(intel_dp);
     415             :         } else {
     416           0 :                 vlv_psr_setup_vsc(intel_dp);
     417             : 
     418             :                 /* Enable PSR on the panel */
     419           0 :                 vlv_psr_enable_sink(intel_dp);
     420             : 
     421             :                 /* On HSW+ enable_source also means go to PSR entry/active
     422             :                  * state as soon as idle_frame achieved and here would be
     423             :                  * to soon. However on VLV enable_source just enable PSR
     424             :                  * but let it on inactive state. So we might do this prior
     425             :                  * to active transition, i.e. here.
     426             :                  */
     427           0 :                 vlv_psr_enable_source(intel_dp);
     428             :         }
     429             : 
     430           0 :         dev_priv->psr.enabled = intel_dp;
     431             : unlock:
     432           0 :         mutex_unlock(&dev_priv->psr.lock);
     433           0 : }
     434             : 
     435           0 : static void vlv_psr_disable(struct intel_dp *intel_dp)
     436             : {
     437           0 :         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
     438           0 :         struct drm_device *dev = intel_dig_port->base.base.dev;
     439           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     440             :         struct intel_crtc *intel_crtc =
     441           0 :                 to_intel_crtc(intel_dig_port->base.base.crtc);
     442             :         uint32_t val;
     443             : 
     444           0 :         if (dev_priv->psr.active) {
     445             :                 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
     446           0 :                 if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
     447             :                               VLV_EDP_PSR_IN_TRANS) == 0, 1))
     448           0 :                         WARN(1, "PSR transition took longer than expected\n");
     449             : 
     450           0 :                 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
     451           0 :                 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
     452           0 :                 val &= ~VLV_EDP_PSR_ENABLE;
     453           0 :                 val &= ~VLV_EDP_PSR_MODE_MASK;
     454           0 :                 I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
     455             : 
     456           0 :                 dev_priv->psr.active = false;
     457           0 :         } else {
     458           0 :                 WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
     459             :         }
     460           0 : }
     461             : 
     462           0 : static void hsw_psr_disable(struct intel_dp *intel_dp)
     463             : {
     464           0 :         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
     465           0 :         struct drm_device *dev = intel_dig_port->base.base.dev;
     466           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     467             : 
     468           0 :         if (dev_priv->psr.active) {
     469           0 :                 I915_WRITE(EDP_PSR_CTL(dev),
     470             :                            I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
     471             : 
     472             :                 /* Wait till PSR is idle */
     473           0 :                 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
     474             :                                EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
     475           0 :                         DRM_ERROR("Timed out waiting for PSR Idle State\n");
     476             : 
     477           0 :                 dev_priv->psr.active = false;
     478           0 :         } else {
     479           0 :                 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
     480             :         }
     481           0 : }
     482             : 
     483             : /**
     484             :  * intel_psr_disable - Disable PSR
     485             :  * @intel_dp: Intel DP
     486             :  *
     487             :  * This function needs to be called before disabling pipe.
     488             :  */
     489           0 : void intel_psr_disable(struct intel_dp *intel_dp)
     490             : {
     491           0 :         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
     492           0 :         struct drm_device *dev = intel_dig_port->base.base.dev;
     493           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     494             : 
     495           0 :         mutex_lock(&dev_priv->psr.lock);
     496           0 :         if (!dev_priv->psr.enabled) {
     497           0 :                 mutex_unlock(&dev_priv->psr.lock);
     498           0 :                 return;
     499             :         }
     500             : 
     501           0 :         if (HAS_DDI(dev))
     502           0 :                 hsw_psr_disable(intel_dp);
     503             :         else
     504           0 :                 vlv_psr_disable(intel_dp);
     505             : 
     506           0 :         dev_priv->psr.enabled = NULL;
     507           0 :         mutex_unlock(&dev_priv->psr.lock);
     508             : 
     509           0 :         cancel_delayed_work_sync(&dev_priv->psr.work);
     510           0 : }
     511             : 
     512           0 : static void intel_psr_work(struct work_struct *work)
     513             : {
     514             :         struct drm_i915_private *dev_priv =
     515           0 :                 container_of(work, typeof(*dev_priv), psr.work.work);
     516           0 :         struct intel_dp *intel_dp = dev_priv->psr.enabled;
     517           0 :         struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
     518           0 :         enum pipe pipe = to_intel_crtc(crtc)->pipe;
     519             : 
     520             :         /* We have to make sure PSR is ready for re-enable
     521             :          * otherwise it keeps disabled until next full enable/disable cycle.
     522             :          * PSR might take some time to get fully disabled
     523             :          * and be ready for re-enable.
     524             :          */
     525           0 :         if (HAS_DDI(dev_priv->dev)) {
     526           0 :                 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
     527             :                               EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
     528           0 :                         DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
     529           0 :                         return;
     530             :                 }
     531             :         } else {
     532           0 :                 if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
     533             :                               VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
     534           0 :                         DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
     535           0 :                         return;
     536             :                 }
     537             :         }
     538           0 :         mutex_lock(&dev_priv->psr.lock);
     539           0 :         intel_dp = dev_priv->psr.enabled;
     540             : 
     541           0 :         if (!intel_dp)
     542             :                 goto unlock;
     543             : 
     544             :         /*
     545             :          * The delayed work can race with an invalidate hence we need to
     546             :          * recheck. Since psr_flush first clears this and then reschedules we
     547             :          * won't ever miss a flush when bailing out here.
     548             :          */
     549           0 :         if (dev_priv->psr.busy_frontbuffer_bits)
     550             :                 goto unlock;
     551             : 
     552           0 :         intel_psr_activate(intel_dp);
     553             : unlock:
     554           0 :         mutex_unlock(&dev_priv->psr.lock);
     555           0 : }
     556             : 
     557           0 : static void intel_psr_exit(struct drm_device *dev)
     558             : {
     559           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     560           0 :         struct intel_dp *intel_dp = dev_priv->psr.enabled;
     561           0 :         struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
     562           0 :         enum pipe pipe = to_intel_crtc(crtc)->pipe;
     563             :         u32 val;
     564             : 
     565           0 :         if (!dev_priv->psr.active)
     566           0 :                 return;
     567             : 
     568           0 :         if (HAS_DDI(dev)) {
     569           0 :                 val = I915_READ(EDP_PSR_CTL(dev));
     570             : 
     571           0 :                 WARN_ON(!(val & EDP_PSR_ENABLE));
     572             : 
     573           0 :                 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
     574           0 :         } else {
     575           0 :                 val = I915_READ(VLV_PSRCTL(pipe));
     576             : 
     577             :                 /* Here we do the transition from PSR_state 3 to PSR_state 5
     578             :                  * directly once PSR State 4 that is active with single frame
     579             :                  * update can be skipped. PSR_state 5 that is PSR exit then
     580             :                  * Hardware is responsible to transition back to PSR_state 1
     581             :                  * that is PSR inactive. Same state after
     582             :                  * vlv_edp_psr_enable_source.
     583             :                  */
     584           0 :                 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
     585           0 :                 I915_WRITE(VLV_PSRCTL(pipe), val);
     586             : 
     587             :                 /* Send AUX wake up - Spec says after transitioning to PSR
     588             :                  * active we have to send AUX wake up by writing 01h in DPCD
     589             :                  * 600h of sink device.
     590             :                  * XXX: This might slow down the transition, but without this
     591             :                  * HW doesn't complete the transition to PSR_state 1 and we
     592             :                  * never get the screen updated.
     593             :                  */
     594           0 :                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
     595             :                                    DP_SET_POWER_D0);
     596             :         }
     597             : 
     598           0 :         dev_priv->psr.active = false;
     599           0 : }
     600             : 
     601             : /**
     602             :  * intel_psr_single_frame_update - Single Frame Update
     603             :  * @dev: DRM device
     604             :  * @frontbuffer_bits: frontbuffer plane tracking bits
     605             :  *
     606             :  * Some platforms support a single frame update feature that is used to
     607             :  * send and update only one frame on Remote Frame Buffer.
     608             :  * So far it is only implemented for Valleyview and Cherryview because
     609             :  * hardware requires this to be done before a page flip.
     610             :  */
     611           0 : void intel_psr_single_frame_update(struct drm_device *dev,
     612             :                                    unsigned frontbuffer_bits)
     613             : {
     614           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     615             :         struct drm_crtc *crtc;
     616             :         enum pipe pipe;
     617             :         u32 val;
     618             : 
     619             :         /*
     620             :          * Single frame update is already supported on BDW+ but it requires
     621             :          * many W/A and it isn't really needed.
     622             :          */
     623           0 :         if (!IS_VALLEYVIEW(dev))
     624           0 :                 return;
     625             : 
     626           0 :         mutex_lock(&dev_priv->psr.lock);
     627           0 :         if (!dev_priv->psr.enabled) {
     628           0 :                 mutex_unlock(&dev_priv->psr.lock);
     629           0 :                 return;
     630             :         }
     631             : 
     632           0 :         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
     633           0 :         pipe = to_intel_crtc(crtc)->pipe;
     634             : 
     635           0 :         if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
     636           0 :                 val = I915_READ(VLV_PSRCTL(pipe));
     637             : 
     638             :                 /*
     639             :                  * We need to set this bit before writing registers for a flip.
     640             :                  * This bit will be self-clear when it gets to the PSR active state.
     641             :                  */
     642           0 :                 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
     643           0 :         }
     644           0 :         mutex_unlock(&dev_priv->psr.lock);
     645           0 : }
     646             : 
     647             : /**
     648             :  * intel_psr_invalidate - Invalidade PSR
     649             :  * @dev: DRM device
     650             :  * @frontbuffer_bits: frontbuffer plane tracking bits
     651             :  *
     652             :  * Since the hardware frontbuffer tracking has gaps we need to integrate
     653             :  * with the software frontbuffer tracking. This function gets called every
     654             :  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
     655             :  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
     656             :  *
     657             :  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
     658             :  */
     659           0 : void intel_psr_invalidate(struct drm_device *dev,
     660             :                           unsigned frontbuffer_bits)
     661             : {
     662           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     663             :         struct drm_crtc *crtc;
     664             :         enum pipe pipe;
     665             : 
     666           0 :         mutex_lock(&dev_priv->psr.lock);
     667           0 :         if (!dev_priv->psr.enabled) {
     668           0 :                 mutex_unlock(&dev_priv->psr.lock);
     669           0 :                 return;
     670             :         }
     671             : 
     672           0 :         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
     673           0 :         pipe = to_intel_crtc(crtc)->pipe;
     674             : 
     675           0 :         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
     676           0 :         dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
     677             : 
     678           0 :         if (frontbuffer_bits)
     679           0 :                 intel_psr_exit(dev);
     680             : 
     681           0 :         mutex_unlock(&dev_priv->psr.lock);
     682           0 : }
     683             : 
     684             : /**
     685             :  * intel_psr_flush - Flush PSR
     686             :  * @dev: DRM device
     687             :  * @frontbuffer_bits: frontbuffer plane tracking bits
     688             :  * @origin: which operation caused the flush
     689             :  *
     690             :  * Since the hardware frontbuffer tracking has gaps we need to integrate
     691             :  * with the software frontbuffer tracking. This function gets called every
     692             :  * time frontbuffer rendering has completed and flushed out to memory. PSR
     693             :  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
     694             :  *
     695             :  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
     696             :  */
     697           0 : void intel_psr_flush(struct drm_device *dev,
     698             :                      unsigned frontbuffer_bits, enum fb_op_origin origin)
     699             : {
     700           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     701             :         struct drm_crtc *crtc;
     702             :         enum pipe pipe;
     703           0 :         int delay_ms = HAS_DDI(dev) ? 100 : 500;
     704             : 
     705           0 :         mutex_lock(&dev_priv->psr.lock);
     706           0 :         if (!dev_priv->psr.enabled) {
     707           0 :                 mutex_unlock(&dev_priv->psr.lock);
     708           0 :                 return;
     709             :         }
     710             : 
     711           0 :         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
     712           0 :         pipe = to_intel_crtc(crtc)->pipe;
     713             : 
     714           0 :         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
     715           0 :         dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
     716             : 
     717           0 :         if (HAS_DDI(dev)) {
     718             :                 /*
     719             :                  * By definition every flush should mean invalidate + flush,
     720             :                  * however on core platforms let's minimize the
     721             :                  * disable/re-enable so we can avoid the invalidate when flip
     722             :                  * originated the flush.
     723             :                  */
     724           0 :                 if (frontbuffer_bits && origin != ORIGIN_FLIP)
     725           0 :                         intel_psr_exit(dev);
     726             :         } else {
     727             :                 /*
     728             :                  * On Valleyview and Cherryview we don't use hardware tracking
     729             :                  * so any plane updates or cursor moves don't result in a PSR
     730             :                  * invalidating. Which means we need to manually fake this in
     731             :                  * software for all flushes.
     732             :                  */
     733           0 :                 if (frontbuffer_bits)
     734           0 :                         intel_psr_exit(dev);
     735             :         }
     736             : 
     737           0 :         if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
     738           0 :                 schedule_delayed_work(&dev_priv->psr.work,
     739           0 :                                       msecs_to_jiffies(delay_ms));
     740           0 :         mutex_unlock(&dev_priv->psr.lock);
     741           0 : }
     742             : 
     743             : /**
     744             :  * intel_psr_init - Init basic PSR work and mutex.
     745             :  * @dev: DRM device
     746             :  *
     747             :  * This function is  called only once at driver load to initialize basic
     748             :  * PSR stuff.
     749             :  */
     750           0 : void intel_psr_init(struct drm_device *dev)
     751             : {
     752           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     753             : 
     754           0 :         INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
     755           0 :         rw_init(&dev_priv->psr.lock, "psrlk");
     756           0 : }

Generated by: LCOV version 1.13