LCOV - code coverage report
Current view: top level - dev/pci/drm/i915 - intel_uncore.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 588 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 117 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  * Copyright © 2013 Intel Corporation
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice (including the next
      12             :  * paragraph) shall be included in all copies or substantial portions of the
      13             :  * Software.
      14             :  *
      15             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      16             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      17             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      18             :  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      19             :  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
      20             :  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
      21             :  * IN THE SOFTWARE.
      22             :  */
      23             : 
      24             : #include "i915_drv.h"
      25             : #include "intel_drv.h"
      26             : #include "i915_vgpu.h"
      27             : 
      28             : #ifdef __linux__
      29             : #include <linux/pm_runtime.h>
      30             : #endif
      31             : 
      32             : #define FORCEWAKE_ACK_TIMEOUT_MS 50
      33             : 
      34             : #define __raw_i915_read8(dev_priv__, reg__) bus_space_read_1((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__))
      35             : #define __raw_i915_write8(dev_priv__, reg__, val__) bus_space_write_1((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__), (val__))
      36             : 
      37             : #define __raw_i915_read16(dev_priv__, reg__) bus_space_read_2((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__))
      38             : #define __raw_i915_write16(dev_priv__, reg__, val__) bus_space_write_2((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__), (val__))
      39             : 
      40             : #define __raw_i915_read32(dev_priv__, reg__) bus_space_read_4((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__))
      41             : #define __raw_i915_write32(dev_priv__, reg__, val__) bus_space_write_4((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__), (val__))
      42             : 
      43             : #define __raw_i915_read64(dev_priv__, reg__) bus_space_read_8((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__))
      44             : #define __raw_i915_write64(dev_priv__, reg__, val__) bus_space_write_8((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__), (val__))
      45             : 
      46             : #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
      47             : 
      48             : static const char * const forcewake_domain_names[] = {
      49             :         "render",
      50             :         "blitter",
      51             :         "media",
      52             : };
      53             : 
      54             : const char *
      55           0 : intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
      56             : {
      57             :         BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
      58             : 
      59           0 :         if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
      60           0 :                 return forcewake_domain_names[id];
      61             : 
      62           0 :         WARN_ON(id);
      63             : 
      64           0 :         return "unknown";
      65           0 : }
      66             : 
      67             : static void
      68           0 : assert_device_not_suspended(struct drm_i915_private *dev_priv)
      69             : {
      70           0 :         WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
      71             :                   "Device suspended\n");
      72           0 : }
      73             : 
      74             : static inline void
      75           0 : fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
      76             : {
      77           0 :         WARN_ON(d->reg_set == 0);
      78           0 :         __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
      79           0 : }
      80             : 
      81             : static inline void
      82           0 : fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
      83             : {
      84           0 :         mod_timer_pinned(&d->timer, jiffies + 1);
      85           0 : }
      86             : 
      87             : static inline void
      88           0 : fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
      89             : {
      90           0 :         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
      91             :                              FORCEWAKE_KERNEL) == 0,
      92             :                             FORCEWAKE_ACK_TIMEOUT_MS))
      93           0 :                 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
      94             :                           intel_uncore_forcewake_domain_to_str(d->id));
      95           0 : }
      96             : 
      97             : static inline void
      98           0 : fw_domain_get(const struct intel_uncore_forcewake_domain *d)
      99             : {
     100           0 :         __raw_i915_write32(d->i915, d->reg_set, d->val_set);
     101           0 : }
     102             : 
     103             : static inline void
     104           0 : fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
     105             : {
     106           0 :         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
     107             :                              FORCEWAKE_KERNEL),
     108             :                             FORCEWAKE_ACK_TIMEOUT_MS))
     109           0 :                 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
     110             :                           intel_uncore_forcewake_domain_to_str(d->id));
     111           0 : }
     112             : 
     113             : static inline void
     114           0 : fw_domain_put(const struct intel_uncore_forcewake_domain *d)
     115             : {
     116           0 :         __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
     117           0 : }
     118             : 
     119             : static inline void
     120           0 : fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
     121             : {
     122             :         /* something from same cacheline, but not from the set register */
     123           0 :         if (d->reg_post)
     124           0 :                 __raw_posting_read(d->i915, d->reg_post);
     125           0 : }
     126             : 
     127             : static void
     128           0 : fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
     129             : {
     130             :         struct intel_uncore_forcewake_domain *d;
     131             :         enum forcewake_domain_id id;
     132             : 
     133           0 :         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
     134           0 :                 fw_domain_wait_ack_clear(d);
     135           0 :                 fw_domain_get(d);
     136           0 :                 fw_domain_wait_ack(d);
     137           0 :         }
     138           0 : }
     139             : 
     140             : static void
     141           0 : fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
     142             : {
     143             :         struct intel_uncore_forcewake_domain *d;
     144             :         enum forcewake_domain_id id;
     145             : 
     146           0 :         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
     147           0 :                 fw_domain_put(d);
     148           0 :                 fw_domain_posting_read(d);
     149           0 :         }
     150           0 : }
     151             : 
     152             : static void
     153           0 : fw_domains_posting_read(struct drm_i915_private *dev_priv)
     154             : {
     155             :         struct intel_uncore_forcewake_domain *d;
     156             :         enum forcewake_domain_id id;
     157             : 
     158             :         /* No need to do for all, just do for first found */
     159           0 :         for_each_fw_domain(d, dev_priv, id) {
     160           0 :                 fw_domain_posting_read(d);
     161           0 :                 break;
     162             :         }
     163           0 : }
     164             : 
     165             : static void
     166           0 : fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
     167             : {
     168             :         struct intel_uncore_forcewake_domain *d;
     169             :         enum forcewake_domain_id id;
     170             : 
     171           0 :         if (dev_priv->uncore.fw_domains == 0)
     172           0 :                 return;
     173             : 
     174           0 :         for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
     175           0 :                 fw_domain_reset(d);
     176             : 
     177           0 :         fw_domains_posting_read(dev_priv);
     178           0 : }
     179             : 
     180           0 : static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
     181             : {
     182             :         /* w/a for a sporadic read returning 0 by waiting for the GT
     183             :          * thread to wake up.
     184             :          */
     185           0 :         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
     186             :                                 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
     187           0 :                 DRM_ERROR("GT thread status wait timed out\n");
     188           0 : }
     189             : 
     190           0 : static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
     191             :                                               enum forcewake_domains fw_domains)
     192             : {
     193           0 :         fw_domains_get(dev_priv, fw_domains);
     194             : 
     195             :         /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
     196           0 :         __gen6_gt_wait_for_thread_c0(dev_priv);
     197           0 : }
     198             : 
     199           0 : static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
     200             : {
     201             :         u32 gtfifodbg;
     202             : 
     203           0 :         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
     204           0 :         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
     205           0 :                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
     206           0 : }
     207             : 
     208           0 : static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
     209             :                                      enum forcewake_domains fw_domains)
     210             : {
     211           0 :         fw_domains_put(dev_priv, fw_domains);
     212           0 :         gen6_gt_check_fifodbg(dev_priv);
     213           0 : }
     214             : 
     215           0 : static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
     216             : {
     217           0 :         u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
     218             : 
     219           0 :         return count & GT_FIFO_FREE_ENTRIES_MASK;
     220             : }
     221             : 
     222           0 : static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
     223             : {
     224             :         int ret = 0;
     225             : 
     226             :         /* On VLV, FIFO will be shared by both SW and HW.
     227             :          * So, we need to read the FREE_ENTRIES everytime */
     228           0 :         if (IS_VALLEYVIEW(dev_priv->dev))
     229           0 :                 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
     230             : 
     231           0 :         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
     232             :                 int loop = 500;
     233           0 :                 u32 fifo = fifo_free_entries(dev_priv);
     234             : 
     235           0 :                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
     236           0 :                         udelay(10);
     237           0 :                         fifo = fifo_free_entries(dev_priv);
     238             :                 }
     239           0 :                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
     240           0 :                         ++ret;
     241           0 :                 dev_priv->uncore.fifo_count = fifo;
     242           0 :         }
     243           0 :         dev_priv->uncore.fifo_count--;
     244             : 
     245           0 :         return ret;
     246             : }
     247             : 
     248           0 : static void intel_uncore_fw_release_timer(unsigned long arg)
     249             : {
     250           0 :         struct intel_uncore_forcewake_domain *domain = (void *)arg;
     251             :         unsigned long irqflags;
     252             : 
     253           0 :         assert_device_not_suspended(domain->i915);
     254             : 
     255           0 :         spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
     256           0 :         if (WARN_ON(domain->wake_count == 0))
     257           0 :                 domain->wake_count++;
     258             : 
     259           0 :         if (--domain->wake_count == 0)
     260           0 :                 domain->i915->uncore.funcs.force_wake_put(domain->i915,
     261           0 :                                                           1 << domain->id);
     262             : 
     263           0 :         spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
     264           0 : }
     265             : 
     266           0 : void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
     267             : {
     268           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     269             :         unsigned long irqflags;
     270             :         struct intel_uncore_forcewake_domain *domain;
     271             :         int retry_count = 100;
     272             :         enum forcewake_domain_id id;
     273             :         enum forcewake_domains fw = 0, active_domains;
     274             : 
     275             :         /* Hold uncore.lock across reset to prevent any register access
     276             :          * with forcewake not set correctly. Wait until all pending
     277             :          * timers are run before holding.
     278             :          */
     279           0 :         while (1) {
     280             :                 active_domains = 0;
     281             : 
     282           0 :                 for_each_fw_domain(domain, dev_priv, id) {
     283           0 :                         if (del_timer_sync(&domain->timer) == 0)
     284             :                                 continue;
     285             : 
     286           0 :                         intel_uncore_fw_release_timer((unsigned long)domain);
     287           0 :                 }
     288             : 
     289           0 :                 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
     290             : 
     291           0 :                 for_each_fw_domain(domain, dev_priv, id) {
     292           0 :                         if (timer_pending(&domain->timer))
     293           0 :                                 active_domains |= (1 << id);
     294             :                 }
     295             : 
     296           0 :                 if (active_domains == 0)
     297             :                         break;
     298             : 
     299           0 :                 if (--retry_count == 0) {
     300           0 :                         DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
     301           0 :                         break;
     302             :                 }
     303             : 
     304           0 :                 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
     305           0 :                 cond_resched();
     306             :         }
     307             : 
     308           0 :         WARN_ON(active_domains);
     309             : 
     310           0 :         for_each_fw_domain(domain, dev_priv, id)
     311           0 :                 if (domain->wake_count)
     312           0 :                         fw |= 1 << id;
     313             : 
     314           0 :         if (fw)
     315           0 :                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
     316             : 
     317           0 :         fw_domains_reset(dev_priv, FORCEWAKE_ALL);
     318             : 
     319           0 :         if (restore) { /* If reset with a user forcewake, try to restore */
     320           0 :                 if (fw)
     321           0 :                         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
     322             : 
     323           0 :                 if (IS_GEN6(dev) || IS_GEN7(dev))
     324           0 :                         dev_priv->uncore.fifo_count =
     325           0 :                                 fifo_free_entries(dev_priv);
     326             :         }
     327             : 
     328           0 :         if (!restore)
     329           0 :                 assert_forcewakes_inactive(dev_priv);
     330             : 
     331           0 :         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
     332           0 : }
     333             : 
     334           0 : static void intel_uncore_ellc_detect(struct drm_device *dev)
     335             : {
     336           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     337             : 
     338           0 :         if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
     339           0 :              INTEL_INFO(dev)->gen >= 9) &&
     340           0 :             (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
     341             :                 /* The docs do not explain exactly how the calculation can be
     342             :                  * made. It is somewhat guessable, but for now, it's always
     343             :                  * 128MB.
     344             :                  * NB: We can't write IDICR yet because we do not have gt funcs
     345             :                  * set up */
     346           0 :                 dev_priv->ellc_size = 128;
     347             :                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
     348           0 :         }
     349           0 : }
     350             : 
     351           0 : static void __intel_uncore_early_sanitize(struct drm_device *dev,
     352             :                                           bool restore_forcewake)
     353             : {
     354           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
     355             : 
     356           0 :         if (HAS_FPGA_DBG_UNCLAIMED(dev))
     357           0 :                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
     358             : 
     359             :         /* clear out old GT FIFO errors */
     360           0 :         if (IS_GEN6(dev) || IS_GEN7(dev))
     361           0 :                 __raw_i915_write32(dev_priv, GTFIFODBG,
     362             :                                    __raw_i915_read32(dev_priv, GTFIFODBG));
     363             : 
     364             :         /* WaDisableShadowRegForCpd:chv */
     365           0 :         if (IS_CHERRYVIEW(dev)) {
     366           0 :                 __raw_i915_write32(dev_priv, GTFIFOCTL,
     367             :                                    __raw_i915_read32(dev_priv, GTFIFOCTL) |
     368             :                                    GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
     369             :                                    GT_FIFO_CTL_RC6_POLICY_STALL);
     370           0 :         }
     371             : 
     372           0 :         intel_uncore_forcewake_reset(dev, restore_forcewake);
     373           0 : }
     374             : 
     375           0 : void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
     376             : {
     377           0 :         __intel_uncore_early_sanitize(dev, restore_forcewake);
     378           0 :         i915_check_and_clear_faults(dev);
     379           0 : }
     380             : 
     381           0 : void intel_uncore_sanitize(struct drm_device *dev)
     382             : {
     383             :         /* BIOS often leaves RC6 enabled, but disable it for hw init */
     384           0 :         intel_disable_gt_powersave(dev);
     385           0 : }
     386             : 
     387           0 : static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
     388             :                                          enum forcewake_domains fw_domains)
     389             : {
     390             :         struct intel_uncore_forcewake_domain *domain;
     391             :         enum forcewake_domain_id id;
     392             : 
     393           0 :         if (!dev_priv->uncore.funcs.force_wake_get)
     394           0 :                 return;
     395             : 
     396           0 :         fw_domains &= dev_priv->uncore.fw_domains;
     397             : 
     398           0 :         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
     399           0 :                 if (domain->wake_count++)
     400           0 :                         fw_domains &= ~(1 << id);
     401             :         }
     402             : 
     403           0 :         if (fw_domains)
     404           0 :                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
     405           0 : }
     406             : 
     407             : /**
     408             :  * intel_uncore_forcewake_get - grab forcewake domain references
     409             :  * @dev_priv: i915 device instance
     410             :  * @fw_domains: forcewake domains to get reference on
     411             :  *
     412             :  * This function can be used get GT's forcewake domain references.
     413             :  * Normal register access will handle the forcewake domains automatically.
     414             :  * However if some sequence requires the GT to not power down a particular
     415             :  * forcewake domains this function should be called at the beginning of the
     416             :  * sequence. And subsequently the reference should be dropped by symmetric
     417             :  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
     418             :  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
     419             :  */
     420           0 : void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
     421             :                                 enum forcewake_domains fw_domains)
     422             : {
     423             :         unsigned long irqflags;
     424             : 
     425           0 :         if (!dev_priv->uncore.funcs.force_wake_get)
     426           0 :                 return;
     427             : 
     428           0 :         WARN_ON(dev_priv->pm.suspended);
     429             : 
     430           0 :         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
     431           0 :         __intel_uncore_forcewake_get(dev_priv, fw_domains);
     432           0 :         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
     433           0 : }
     434             : 
     435             : /**
     436             :  * intel_uncore_forcewake_get__locked - grab forcewake domain references
     437             :  * @dev_priv: i915 device instance
     438             :  * @fw_domains: forcewake domains to get reference on
     439             :  *
     440             :  * See intel_uncore_forcewake_get(). This variant places the onus
     441             :  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
     442             :  */
     443           0 : void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
     444             :                                         enum forcewake_domains fw_domains)
     445             : {
     446           0 :         assert_spin_locked(&dev_priv->uncore.lock);
     447             : 
     448           0 :         if (!dev_priv->uncore.funcs.force_wake_get)
     449             :                 return;
     450             : 
     451           0 :         __intel_uncore_forcewake_get(dev_priv, fw_domains);
     452           0 : }
     453             : 
     454           0 : static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
     455             :                                          enum forcewake_domains fw_domains)
     456             : {
     457             :         struct intel_uncore_forcewake_domain *domain;
     458             :         enum forcewake_domain_id id;
     459             : 
     460           0 :         if (!dev_priv->uncore.funcs.force_wake_put)
     461           0 :                 return;
     462             : 
     463           0 :         fw_domains &= dev_priv->uncore.fw_domains;
     464             : 
     465           0 :         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
     466           0 :                 if (WARN_ON(domain->wake_count == 0))
     467             :                         continue;
     468             : 
     469           0 :                 if (--domain->wake_count)
     470             :                         continue;
     471             : 
     472           0 :                 domain->wake_count++;
     473           0 :                 fw_domain_arm_timer(domain);
     474           0 :         }
     475           0 : }
     476             : 
     477             : /**
     478             :  * intel_uncore_forcewake_put - release a forcewake domain reference
     479             :  * @dev_priv: i915 device instance
     480             :  * @fw_domains: forcewake domains to put references
     481             :  *
     482             :  * This function drops the device-level forcewakes for specified
     483             :  * domains obtained by intel_uncore_forcewake_get().
     484             :  */
     485           0 : void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
     486             :                                 enum forcewake_domains fw_domains)
     487             : {
     488             :         unsigned long irqflags;
     489             : 
     490           0 :         if (!dev_priv->uncore.funcs.force_wake_put)
     491           0 :                 return;
     492             : 
     493           0 :         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
     494           0 :         __intel_uncore_forcewake_put(dev_priv, fw_domains);
     495           0 :         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
     496           0 : }
     497             : 
     498             : /**
     499             :  * intel_uncore_forcewake_put__locked - grab forcewake domain references
     500             :  * @dev_priv: i915 device instance
     501             :  * @fw_domains: forcewake domains to get reference on
     502             :  *
     503             :  * See intel_uncore_forcewake_put(). This variant places the onus
     504             :  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
     505             :  */
     506           0 : void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
     507             :                                         enum forcewake_domains fw_domains)
     508             : {
     509           0 :         assert_spin_locked(&dev_priv->uncore.lock);
     510             : 
     511           0 :         if (!dev_priv->uncore.funcs.force_wake_put)
     512             :                 return;
     513             : 
     514           0 :         __intel_uncore_forcewake_put(dev_priv, fw_domains);
     515           0 : }
     516             : 
     517           0 : void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
     518             : {
     519             :         struct intel_uncore_forcewake_domain *domain;
     520             :         enum forcewake_domain_id id;
     521             : 
     522           0 :         if (!dev_priv->uncore.funcs.force_wake_get)
     523           0 :                 return;
     524             : 
     525           0 :         for_each_fw_domain(domain, dev_priv, id)
     526           0 :                 WARN_ON(domain->wake_count);
     527           0 : }
     528             : 
     529             : /* We give fast paths for the really cool registers */
     530             : #define NEEDS_FORCE_WAKE(reg) \
     531             :          ((reg) < 0x40000 && (reg) != FORCEWAKE)
     532             : 
     533             : #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
     534             : 
     535             : #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
     536             :         (REG_RANGE((reg), 0x2000, 0x4000) || \
     537             :          REG_RANGE((reg), 0x5000, 0x8000) || \
     538             :          REG_RANGE((reg), 0xB000, 0x12000) || \
     539             :          REG_RANGE((reg), 0x2E000, 0x30000))
     540             : 
     541             : #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
     542             :         (REG_RANGE((reg), 0x12000, 0x14000) || \
     543             :          REG_RANGE((reg), 0x22000, 0x24000) || \
     544             :          REG_RANGE((reg), 0x30000, 0x40000))
     545             : 
     546             : #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
     547             :         (REG_RANGE((reg), 0x2000, 0x4000) || \
     548             :          REG_RANGE((reg), 0x5200, 0x8000) || \
     549             :          REG_RANGE((reg), 0x8300, 0x8500) || \
     550             :          REG_RANGE((reg), 0xB000, 0xB480) || \
     551             :          REG_RANGE((reg), 0xE000, 0xE800))
     552             : 
     553             : #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
     554             :         (REG_RANGE((reg), 0x8800, 0x8900) || \
     555             :          REG_RANGE((reg), 0xD000, 0xD800) || \
     556             :          REG_RANGE((reg), 0x12000, 0x14000) || \
     557             :          REG_RANGE((reg), 0x1A000, 0x1C000) || \
     558             :          REG_RANGE((reg), 0x1E800, 0x1EA00) || \
     559             :          REG_RANGE((reg), 0x30000, 0x38000))
     560             : 
     561             : #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
     562             :         (REG_RANGE((reg), 0x4000, 0x5000) || \
     563             :          REG_RANGE((reg), 0x8000, 0x8300) || \
     564             :          REG_RANGE((reg), 0x8500, 0x8600) || \
     565             :          REG_RANGE((reg), 0x9000, 0xB000) || \
     566             :          REG_RANGE((reg), 0xF000, 0x10000))
     567             : 
     568             : #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
     569             :         REG_RANGE((reg), 0xB00,  0x2000)
     570             : 
     571             : #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
     572             :         (REG_RANGE((reg), 0x2000, 0x2700) || \
     573             :          REG_RANGE((reg), 0x3000, 0x4000) || \
     574             :          REG_RANGE((reg), 0x5200, 0x8000) || \
     575             :          REG_RANGE((reg), 0x8140, 0x8160) || \
     576             :          REG_RANGE((reg), 0x8300, 0x8500) || \
     577             :          REG_RANGE((reg), 0x8C00, 0x8D00) || \
     578             :          REG_RANGE((reg), 0xB000, 0xB480) || \
     579             :          REG_RANGE((reg), 0xE000, 0xE900) || \
     580             :          REG_RANGE((reg), 0x24400, 0x24800))
     581             : 
     582             : #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
     583             :         (REG_RANGE((reg), 0x8130, 0x8140) || \
     584             :          REG_RANGE((reg), 0x8800, 0x8A00) || \
     585             :          REG_RANGE((reg), 0xD000, 0xD800) || \
     586             :          REG_RANGE((reg), 0x12000, 0x14000) || \
     587             :          REG_RANGE((reg), 0x1A000, 0x1EA00) || \
     588             :          REG_RANGE((reg), 0x30000, 0x40000))
     589             : 
     590             : #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
     591             :         REG_RANGE((reg), 0x9400, 0x9800)
     592             : 
     593             : #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
     594             :         ((reg) < 0x40000 &&\
     595             :          !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
     596             :          !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
     597             :          !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
     598             :          !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
     599             : 
     600             : static void
     601           0 : ilk_dummy_write(struct drm_i915_private *dev_priv)
     602             : {
     603             :         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
     604             :          * the chip from rc6 before touching it for real. MI_MODE is masked,
     605             :          * hence harmless to write 0 into. */
     606           0 :         __raw_i915_write32(dev_priv, MI_MODE, 0);
     607           0 : }
     608             : 
     609             : static void
     610           0 : hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
     611             :                         bool before)
     612             : {
     613           0 :         const char *op = read ? "reading" : "writing to";
     614           0 :         const char *when = before ? "before" : "after";
     615             : 
     616           0 :         if (!i915.mmio_debug)
     617           0 :                 return;
     618             : 
     619           0 :         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
     620           0 :                 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
     621             :                      when, op, reg);
     622           0 :                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
     623           0 :                 i915.mmio_debug--; /* Only report the first N failures */
     624           0 :         }
     625           0 : }
     626             : 
     627             : static void
     628           0 : hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
     629             : {
     630             :         static bool mmio_debug_once = true;
     631             : 
     632           0 :         if (i915.mmio_debug || !mmio_debug_once)
     633             :                 return;
     634             : 
     635           0 :         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
     636             :                 DRM_DEBUG("Unclaimed register detected, "
     637             :                           "enabling oneshot unclaimed register reporting. "
     638             :                           "Please use i915.mmio_debug=N for more information.\n");
     639           0 :                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
     640           0 :                 i915.mmio_debug = mmio_debug_once;
     641           0 :                 mmio_debug_once = false;
     642           0 :         }
     643           0 : }
     644             : 
     645             : #define GEN2_READ_HEADER(x) \
     646             :         u##x val = 0; \
     647             :         assert_device_not_suspended(dev_priv);
     648             : 
     649             : #define GEN2_READ_FOOTER \
     650             :         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
     651             :         return val
     652             : 
     653             : #define __gen2_read(x) \
     654             : static u##x \
     655             : gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
     656             :         GEN2_READ_HEADER(x); \
     657             :         val = __raw_i915_read##x(dev_priv, reg); \
     658             :         GEN2_READ_FOOTER; \
     659             : }
     660             : 
     661             : #define __gen5_read(x) \
     662             : static u##x \
     663             : gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
     664             :         GEN2_READ_HEADER(x); \
     665             :         ilk_dummy_write(dev_priv); \
     666             :         val = __raw_i915_read##x(dev_priv, reg); \
     667             :         GEN2_READ_FOOTER; \
     668             : }
     669             : 
     670           0 : __gen5_read(8)
     671           0 : __gen5_read(16)
     672           0 : __gen5_read(32)
     673           0 : __gen5_read(64)
     674           0 : __gen2_read(8)
     675           0 : __gen2_read(16)
     676           0 : __gen2_read(32)
     677           0 : __gen2_read(64)
     678             : 
     679             : #undef __gen5_read
     680             : #undef __gen2_read
     681             : 
     682             : #undef GEN2_READ_FOOTER
     683             : #undef GEN2_READ_HEADER
     684             : 
     685             : #define GEN6_READ_HEADER(x) \
     686             :         unsigned long irqflags; \
     687             :         u##x val = 0; \
     688             :         assert_device_not_suspended(dev_priv); \
     689             :         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
     690             : 
     691             : #define GEN6_READ_FOOTER \
     692             :         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
     693             :         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
     694             :         return val
     695             : 
     696           0 : static inline void __force_wake_get(struct drm_i915_private *dev_priv,
     697             :                                     enum forcewake_domains fw_domains)
     698             : {
     699             :         struct intel_uncore_forcewake_domain *domain;
     700             :         enum forcewake_domain_id id;
     701             : 
     702           0 :         if (WARN_ON(!fw_domains))
     703           0 :                 return;
     704             : 
     705             :         /* Ideally GCC would be constant-fold and eliminate this loop */
     706           0 :         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
     707           0 :                 if (domain->wake_count) {
     708           0 :                         fw_domains &= ~(1 << id);
     709           0 :                         continue;
     710             :                 }
     711             : 
     712           0 :                 domain->wake_count++;
     713           0 :                 fw_domain_arm_timer(domain);
     714           0 :         }
     715             : 
     716           0 :         if (fw_domains)
     717           0 :                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
     718           0 : }
     719             : 
     720             : #define __vgpu_read(x) \
     721             : static u##x \
     722             : vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
     723             :         GEN6_READ_HEADER(x); \
     724             :         val = __raw_i915_read##x(dev_priv, reg); \
     725             :         GEN6_READ_FOOTER; \
     726             : }
     727             : 
     728             : #define __gen6_read(x) \
     729             : static u##x \
     730             : gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
     731             :         GEN6_READ_HEADER(x); \
     732             :         hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
     733             :         if (NEEDS_FORCE_WAKE(reg)) \
     734             :                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
     735             :         val = __raw_i915_read##x(dev_priv, reg); \
     736             :         hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
     737             :         GEN6_READ_FOOTER; \
     738             : }
     739             : 
     740             : #define __vlv_read(x) \
     741             : static u##x \
     742             : vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
     743             :         GEN6_READ_HEADER(x); \
     744             :         if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
     745             :                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
     746             :         else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
     747             :                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
     748             :         val = __raw_i915_read##x(dev_priv, reg); \
     749             :         GEN6_READ_FOOTER; \
     750             : }
     751             : 
     752             : #define __chv_read(x) \
     753             : static u##x \
     754             : chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
     755             :         GEN6_READ_HEADER(x); \
     756             :         if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
     757             :                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
     758             :         else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
     759             :                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
     760             :         else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
     761             :                 __force_wake_get(dev_priv, \
     762             :                                  FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
     763             :         val = __raw_i915_read##x(dev_priv, reg); \
     764             :         GEN6_READ_FOOTER; \
     765             : }
     766             : 
     767             : #define SKL_NEEDS_FORCE_WAKE(reg) \
     768             :          ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
     769             : 
     770             : #define __gen9_read(x) \
     771             : static u##x \
     772             : gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
     773             :         enum forcewake_domains fw_engine; \
     774             :         GEN6_READ_HEADER(x); \
     775             :         hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
     776             :         if (!SKL_NEEDS_FORCE_WAKE(reg)) \
     777             :                 fw_engine = 0; \
     778             :         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
     779             :                 fw_engine = FORCEWAKE_RENDER; \
     780             :         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
     781             :                 fw_engine = FORCEWAKE_MEDIA; \
     782             :         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
     783             :                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
     784             :         else \
     785             :                 fw_engine = FORCEWAKE_BLITTER; \
     786             :         if (fw_engine) \
     787             :                 __force_wake_get(dev_priv, fw_engine); \
     788             :         val = __raw_i915_read##x(dev_priv, reg); \
     789             :         hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
     790             :         GEN6_READ_FOOTER; \
     791             : }
     792             : 
     793           0 : __vgpu_read(8)
     794           0 : __vgpu_read(16)
     795           0 : __vgpu_read(32)
     796           0 : __vgpu_read(64)
     797           0 : __gen9_read(8)
     798           0 : __gen9_read(16)
     799           0 : __gen9_read(32)
     800           0 : __gen9_read(64)
     801           0 : __chv_read(8)
     802           0 : __chv_read(16)
     803           0 : __chv_read(32)
     804           0 : __chv_read(64)
     805           0 : __vlv_read(8)
     806           0 : __vlv_read(16)
     807           0 : __vlv_read(32)
     808           0 : __vlv_read(64)
     809           0 : __gen6_read(8)
     810           0 : __gen6_read(16)
     811           0 : __gen6_read(32)
     812           0 : __gen6_read(64)
     813             : 
     814             : #undef __gen9_read
     815             : #undef __chv_read
     816             : #undef __vlv_read
     817             : #undef __gen6_read
     818             : #undef __vgpu_read
     819             : #undef GEN6_READ_FOOTER
     820             : #undef GEN6_READ_HEADER
     821             : 
     822             : #define GEN2_WRITE_HEADER \
     823             :         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
     824             :         assert_device_not_suspended(dev_priv); \
     825             : 
     826             : #define GEN2_WRITE_FOOTER
     827             : 
     828             : #define __gen2_write(x) \
     829             : static void \
     830             : gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
     831             :         GEN2_WRITE_HEADER; \
     832             :         __raw_i915_write##x(dev_priv, reg, val); \
     833             :         GEN2_WRITE_FOOTER; \
     834             : }
     835             : 
     836             : #define __gen5_write(x) \
     837             : static void \
     838             : gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
     839             :         GEN2_WRITE_HEADER; \
     840             :         ilk_dummy_write(dev_priv); \
     841             :         __raw_i915_write##x(dev_priv, reg, val); \
     842             :         GEN2_WRITE_FOOTER; \
     843             : }
     844             : 
     845           0 : __gen5_write(8)
     846           0 : __gen5_write(16)
     847           0 : __gen5_write(32)
     848           0 : __gen5_write(64)
     849           0 : __gen2_write(8)
     850           0 : __gen2_write(16)
     851           0 : __gen2_write(32)
     852           0 : __gen2_write(64)
     853             : 
     854             : #undef __gen5_write
     855             : #undef __gen2_write
     856             : 
     857             : #undef GEN2_WRITE_FOOTER
     858             : #undef GEN2_WRITE_HEADER
     859             : 
     860             : #define GEN6_WRITE_HEADER \
     861             :         unsigned long irqflags; \
     862             :         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
     863             :         assert_device_not_suspended(dev_priv); \
     864             :         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
     865             : 
     866             : #define GEN6_WRITE_FOOTER \
     867             :         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
     868             : 
     869             : #define __gen6_write(x) \
     870             : static void \
     871             : gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
     872             :         u32 __fifo_ret = 0; \
     873             :         GEN6_WRITE_HEADER; \
     874             :         if (NEEDS_FORCE_WAKE(reg)) { \
     875             :                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
     876             :         } \
     877             :         __raw_i915_write##x(dev_priv, reg, val); \
     878             :         if (unlikely(__fifo_ret)) { \
     879             :                 gen6_gt_check_fifodbg(dev_priv); \
     880             :         } \
     881             :         GEN6_WRITE_FOOTER; \
     882             : }
     883             : 
     884             : #define __hsw_write(x) \
     885             : static void \
     886             : hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
     887             :         u32 __fifo_ret = 0; \
     888             :         GEN6_WRITE_HEADER; \
     889             :         if (NEEDS_FORCE_WAKE(reg)) { \
     890             :                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
     891             :         } \
     892             :         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
     893             :         __raw_i915_write##x(dev_priv, reg, val); \
     894             :         if (unlikely(__fifo_ret)) { \
     895             :                 gen6_gt_check_fifodbg(dev_priv); \
     896             :         } \
     897             :         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
     898             :         hsw_unclaimed_reg_detect(dev_priv); \
     899             :         GEN6_WRITE_FOOTER; \
     900             : }
     901             : 
     902             : #define __vgpu_write(x) \
     903             : static void vgpu_write##x(struct drm_i915_private *dev_priv, \
     904             :                           off_t reg, u##x val, bool trace) { \
     905             :         GEN6_WRITE_HEADER; \
     906             :         __raw_i915_write##x(dev_priv, reg, val); \
     907             :         GEN6_WRITE_FOOTER; \
     908             : }
     909             : 
     910             : static const u32 gen8_shadowed_regs[] = {
     911             :         FORCEWAKE_MT,
     912             :         GEN6_RPNSWREQ,
     913             :         GEN6_RC_VIDEO_FREQ,
     914             :         RING_TAIL(RENDER_RING_BASE),
     915             :         RING_TAIL(GEN6_BSD_RING_BASE),
     916             :         RING_TAIL(VEBOX_RING_BASE),
     917             :         RING_TAIL(BLT_RING_BASE),
     918             :         /* TODO: Other registers are not yet used */
     919             : };
     920             : 
     921           0 : static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
     922             : {
     923             :         int i;
     924           0 :         for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
     925           0 :                 if (reg == gen8_shadowed_regs[i])
     926           0 :                         return true;
     927             : 
     928           0 :         return false;
     929           0 : }
     930             : 
     931             : #define __gen8_write(x) \
     932             : static void \
     933             : gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
     934             :         GEN6_WRITE_HEADER; \
     935             :         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
     936             :         if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
     937             :                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
     938             :         __raw_i915_write##x(dev_priv, reg, val); \
     939             :         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
     940             :         hsw_unclaimed_reg_detect(dev_priv); \
     941             :         GEN6_WRITE_FOOTER; \
     942             : }
     943             : 
     944             : #define __chv_write(x) \
     945             : static void \
     946             : chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
     947             :         bool shadowed = is_gen8_shadowed(dev_priv, reg); \
     948             :         GEN6_WRITE_HEADER; \
     949             :         if (!shadowed) { \
     950             :                 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
     951             :                         __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
     952             :                 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
     953             :                         __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
     954             :                 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
     955             :                         __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
     956             :         } \
     957             :         __raw_i915_write##x(dev_priv, reg, val); \
     958             :         GEN6_WRITE_FOOTER; \
     959             : }
     960             : 
     961             : static const u32 gen9_shadowed_regs[] = {
     962             :         RING_TAIL(RENDER_RING_BASE),
     963             :         RING_TAIL(GEN6_BSD_RING_BASE),
     964             :         RING_TAIL(VEBOX_RING_BASE),
     965             :         RING_TAIL(BLT_RING_BASE),
     966             :         FORCEWAKE_BLITTER_GEN9,
     967             :         FORCEWAKE_RENDER_GEN9,
     968             :         FORCEWAKE_MEDIA_GEN9,
     969             :         GEN6_RPNSWREQ,
     970             :         GEN6_RC_VIDEO_FREQ,
     971             :         /* TODO: Other registers are not yet used */
     972             : };
     973             : 
     974           0 : static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
     975             : {
     976             :         int i;
     977           0 :         for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
     978           0 :                 if (reg == gen9_shadowed_regs[i])
     979           0 :                         return true;
     980             : 
     981           0 :         return false;
     982           0 : }
     983             : 
     984             : #define __gen9_write(x) \
     985             : static void \
     986             : gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
     987             :                 bool trace) { \
     988             :         enum forcewake_domains fw_engine; \
     989             :         GEN6_WRITE_HEADER; \
     990             :         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
     991             :         if (!SKL_NEEDS_FORCE_WAKE(reg) || \
     992             :             is_gen9_shadowed(dev_priv, reg)) \
     993             :                 fw_engine = 0; \
     994             :         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
     995             :                 fw_engine = FORCEWAKE_RENDER; \
     996             :         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
     997             :                 fw_engine = FORCEWAKE_MEDIA; \
     998             :         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
     999             :                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
    1000             :         else \
    1001             :                 fw_engine = FORCEWAKE_BLITTER; \
    1002             :         if (fw_engine) \
    1003             :                 __force_wake_get(dev_priv, fw_engine); \
    1004             :         __raw_i915_write##x(dev_priv, reg, val); \
    1005             :         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
    1006             :         hsw_unclaimed_reg_detect(dev_priv); \
    1007             :         GEN6_WRITE_FOOTER; \
    1008             : }
    1009             : 
    1010           0 : __gen9_write(8)
    1011           0 : __gen9_write(16)
    1012           0 : __gen9_write(32)
    1013           0 : __gen9_write(64)
    1014           0 : __chv_write(8)
    1015           0 : __chv_write(16)
    1016           0 : __chv_write(32)
    1017           0 : __chv_write(64)
    1018           0 : __gen8_write(8)
    1019           0 : __gen8_write(16)
    1020           0 : __gen8_write(32)
    1021           0 : __gen8_write(64)
    1022           0 : __hsw_write(8)
    1023           0 : __hsw_write(16)
    1024           0 : __hsw_write(32)
    1025           0 : __hsw_write(64)
    1026           0 : __gen6_write(8)
    1027           0 : __gen6_write(16)
    1028           0 : __gen6_write(32)
    1029           0 : __gen6_write(64)
    1030           0 : __vgpu_write(8)
    1031           0 : __vgpu_write(16)
    1032           0 : __vgpu_write(32)
    1033           0 : __vgpu_write(64)
    1034             : 
    1035             : #undef __gen9_write
    1036             : #undef __chv_write
    1037             : #undef __gen8_write
    1038             : #undef __hsw_write
    1039             : #undef __gen6_write
    1040             : #undef __vgpu_write
    1041             : #undef GEN6_WRITE_FOOTER
    1042             : #undef GEN6_WRITE_HEADER
    1043             : 
    1044             : #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
    1045             : do { \
    1046             :         dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
    1047             :         dev_priv->uncore.funcs.mmio_writew = x##_write16; \
    1048             :         dev_priv->uncore.funcs.mmio_writel = x##_write32; \
    1049             :         dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
    1050             : } while (0)
    1051             : 
    1052             : #define ASSIGN_READ_MMIO_VFUNCS(x) \
    1053             : do { \
    1054             :         dev_priv->uncore.funcs.mmio_readb = x##_read8; \
    1055             :         dev_priv->uncore.funcs.mmio_readw = x##_read16; \
    1056             :         dev_priv->uncore.funcs.mmio_readl = x##_read32; \
    1057             :         dev_priv->uncore.funcs.mmio_readq = x##_read64; \
    1058             : } while (0)
    1059             : 
    1060             : 
    1061           0 : static void fw_domain_init(struct drm_i915_private *dev_priv,
    1062             :                            enum forcewake_domain_id domain_id,
    1063             :                            u32 reg_set, u32 reg_ack)
    1064             : {
    1065             :         struct intel_uncore_forcewake_domain *d;
    1066             : 
    1067           0 :         if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
    1068           0 :                 return;
    1069             : 
    1070           0 :         d = &dev_priv->uncore.fw_domain[domain_id];
    1071             : 
    1072           0 :         WARN_ON(d->wake_count);
    1073             : 
    1074           0 :         d->wake_count = 0;
    1075           0 :         d->reg_set = reg_set;
    1076           0 :         d->reg_ack = reg_ack;
    1077             : 
    1078           0 :         if (IS_GEN6(dev_priv)) {
    1079           0 :                 d->val_reset = 0;
    1080           0 :                 d->val_set = FORCEWAKE_KERNEL;
    1081           0 :                 d->val_clear = 0;
    1082           0 :         } else {
    1083             :                 /* WaRsClearFWBitsAtReset:bdw,skl */
    1084           0 :                 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
    1085           0 :                 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
    1086           0 :                 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
    1087             :         }
    1088             : 
    1089           0 :         if (IS_VALLEYVIEW(dev_priv))
    1090           0 :                 d->reg_post = FORCEWAKE_ACK_VLV;
    1091           0 :         else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
    1092           0 :                 d->reg_post = ECOBUS;
    1093             :         else
    1094           0 :                 d->reg_post = 0;
    1095             : 
    1096           0 :         d->i915 = dev_priv;
    1097           0 :         d->id = domain_id;
    1098             : 
    1099           0 :         setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
    1100             : 
    1101           0 :         dev_priv->uncore.fw_domains |= (1 << domain_id);
    1102             : 
    1103           0 :         fw_domain_reset(d);
    1104           0 : }
    1105             : 
    1106           0 : static void intel_uncore_fw_domains_init(struct drm_device *dev)
    1107             : {
    1108           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1109             : 
    1110           0 :         if (INTEL_INFO(dev_priv->dev)->gen <= 5)
    1111           0 :                 return;
    1112             : 
    1113           0 :         if (IS_GEN9(dev)) {
    1114           0 :                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
    1115           0 :                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
    1116           0 :                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
    1117             :                                FORCEWAKE_RENDER_GEN9,
    1118             :                                FORCEWAKE_ACK_RENDER_GEN9);
    1119           0 :                 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
    1120             :                                FORCEWAKE_BLITTER_GEN9,
    1121             :                                FORCEWAKE_ACK_BLITTER_GEN9);
    1122           0 :                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
    1123             :                                FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
    1124           0 :         } else if (IS_VALLEYVIEW(dev)) {
    1125           0 :                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
    1126           0 :                 if (!IS_CHERRYVIEW(dev))
    1127           0 :                         dev_priv->uncore.funcs.force_wake_put =
    1128             :                                 fw_domains_put_with_fifo;
    1129             :                 else
    1130           0 :                         dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
    1131           0 :                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
    1132             :                                FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
    1133           0 :                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
    1134             :                                FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
    1135           0 :         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
    1136           0 :                 dev_priv->uncore.funcs.force_wake_get =
    1137             :                         fw_domains_get_with_thread_status;
    1138           0 :                 if (IS_HASWELL(dev))
    1139           0 :                         dev_priv->uncore.funcs.force_wake_put =
    1140             :                                 fw_domains_put_with_fifo;
    1141             :                 else
    1142           0 :                         dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
    1143           0 :                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
    1144             :                                FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
    1145           0 :         } else if (IS_IVYBRIDGE(dev)) {
    1146             :                 u32 ecobus;
    1147             : 
    1148             :                 /* IVB configs may use multi-threaded forcewake */
    1149             : 
    1150             :                 /* A small trick here - if the bios hasn't configured
    1151             :                  * MT forcewake, and if the device is in RC6, then
    1152             :                  * force_wake_mt_get will not wake the device and the
    1153             :                  * ECOBUS read will return zero. Which will be
    1154             :                  * (correctly) interpreted by the test below as MT
    1155             :                  * forcewake being disabled.
    1156             :                  */
    1157           0 :                 dev_priv->uncore.funcs.force_wake_get =
    1158             :                         fw_domains_get_with_thread_status;
    1159           0 :                 dev_priv->uncore.funcs.force_wake_put =
    1160             :                         fw_domains_put_with_fifo;
    1161             : 
    1162             :                 /* We need to init first for ECOBUS access and then
    1163             :                  * determine later if we want to reinit, in case of MT access is
    1164             :                  * not working. In this stage we don't know which flavour this
    1165             :                  * ivb is, so it is better to reset also the gen6 fw registers
    1166             :                  * before the ecobus check.
    1167             :                  */
    1168             : 
    1169           0 :                 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
    1170           0 :                 __raw_posting_read(dev_priv, ECOBUS);
    1171             : 
    1172           0 :                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
    1173             :                                FORCEWAKE_MT, FORCEWAKE_MT_ACK);
    1174             : 
    1175           0 :                 mutex_lock(&dev->struct_mutex);
    1176           0 :                 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
    1177           0 :                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
    1178           0 :                 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
    1179           0 :                 mutex_unlock(&dev->struct_mutex);
    1180             : 
    1181           0 :                 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
    1182             :                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
    1183             :                         DRM_INFO("when using vblank-synced partial screen updates.\n");
    1184           0 :                         fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
    1185             :                                        FORCEWAKE, FORCEWAKE_ACK);
    1186           0 :                 }
    1187           0 :         } else if (IS_GEN6(dev)) {
    1188           0 :                 dev_priv->uncore.funcs.force_wake_get =
    1189             :                         fw_domains_get_with_thread_status;
    1190           0 :                 dev_priv->uncore.funcs.force_wake_put =
    1191             :                         fw_domains_put_with_fifo;
    1192           0 :                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
    1193             :                                FORCEWAKE, FORCEWAKE_ACK);
    1194           0 :         }
    1195             : 
    1196             :         /* All future platforms are expected to require complex power gating */
    1197           0 :         WARN_ON(dev_priv->uncore.fw_domains == 0);
    1198           0 : }
    1199             : 
    1200           0 : void intel_uncore_init(struct drm_device *dev)
    1201             : {
    1202           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1203             : 
    1204           0 :         i915_check_vgpu(dev);
    1205             : 
    1206           0 :         intel_uncore_ellc_detect(dev);
    1207           0 :         intel_uncore_fw_domains_init(dev);
    1208           0 :         __intel_uncore_early_sanitize(dev, false);
    1209             : 
    1210           0 :         switch (INTEL_INFO(dev)->gen) {
    1211             :         default:
    1212             :         case 9:
    1213           0 :                 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
    1214           0 :                 ASSIGN_READ_MMIO_VFUNCS(gen9);
    1215           0 :                 break;
    1216             :         case 8:
    1217           0 :                 if (IS_CHERRYVIEW(dev)) {
    1218           0 :                         ASSIGN_WRITE_MMIO_VFUNCS(chv);
    1219           0 :                         ASSIGN_READ_MMIO_VFUNCS(chv);
    1220             : 
    1221           0 :                 } else {
    1222           0 :                         ASSIGN_WRITE_MMIO_VFUNCS(gen8);
    1223           0 :                         ASSIGN_READ_MMIO_VFUNCS(gen6);
    1224             :                 }
    1225             :                 break;
    1226             :         case 7:
    1227             :         case 6:
    1228           0 :                 if (IS_HASWELL(dev)) {
    1229           0 :                         ASSIGN_WRITE_MMIO_VFUNCS(hsw);
    1230           0 :                 } else {
    1231           0 :                         ASSIGN_WRITE_MMIO_VFUNCS(gen6);
    1232             :                 }
    1233             : 
    1234           0 :                 if (IS_VALLEYVIEW(dev)) {
    1235           0 :                         ASSIGN_READ_MMIO_VFUNCS(vlv);
    1236           0 :                 } else {
    1237           0 :                         ASSIGN_READ_MMIO_VFUNCS(gen6);
    1238             :                 }
    1239             :                 break;
    1240             :         case 5:
    1241           0 :                 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
    1242           0 :                 ASSIGN_READ_MMIO_VFUNCS(gen5);
    1243           0 :                 break;
    1244             :         case 4:
    1245             :         case 3:
    1246             :         case 2:
    1247           0 :                 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
    1248           0 :                 ASSIGN_READ_MMIO_VFUNCS(gen2);
    1249           0 :                 break;
    1250             :         }
    1251             : 
    1252           0 :         if (intel_vgpu_active(dev)) {
    1253           0 :                 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
    1254           0 :                 ASSIGN_READ_MMIO_VFUNCS(vgpu);
    1255           0 :         }
    1256             : 
    1257           0 :         i915_check_and_clear_faults(dev);
    1258           0 : }
    1259             : #undef ASSIGN_WRITE_MMIO_VFUNCS
    1260             : #undef ASSIGN_READ_MMIO_VFUNCS
    1261             : 
    1262           0 : void intel_uncore_fini(struct drm_device *dev)
    1263             : {
    1264             :         /* Paranoia: make sure we have disabled everything before we exit. */
    1265           0 :         intel_uncore_sanitize(dev);
    1266           0 :         intel_uncore_forcewake_reset(dev, false);
    1267           0 : }
    1268             : 
    1269             : #define GEN_RANGE(l, h) GENMASK(h, l)
    1270             : 
    1271             : static const struct register_whitelist {
    1272             :         uint64_t offset;
    1273             :         uint32_t size;
    1274             :         /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
    1275             :         uint32_t gen_bitmask;
    1276             : } whitelist[] = {
    1277             :         { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
    1278             : };
    1279             : 
    1280           0 : int i915_reg_read_ioctl(struct drm_device *dev,
    1281             :                         void *data, struct drm_file *file)
    1282             : {
    1283           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1284           0 :         struct drm_i915_reg_read *reg = data;
    1285             :         struct register_whitelist const *entry = whitelist;
    1286             :         unsigned size;
    1287             :         u64 offset;
    1288             :         int i, ret = 0;
    1289             : 
    1290           0 :         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
    1291           0 :                 if (entry->offset == (reg->offset & -entry->size) &&
    1292           0 :                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
    1293             :                         break;
    1294             :         }
    1295             : 
    1296           0 :         if (i == ARRAY_SIZE(whitelist))
    1297           0 :                 return -EINVAL;
    1298             : 
    1299             :         /* We use the low bits to encode extra flags as the register should
    1300             :          * be naturally aligned (and those that are not so aligned merely
    1301             :          * limit the available flags for that register).
    1302             :          */
    1303           0 :         offset = entry->offset;
    1304           0 :         size = entry->size;
    1305           0 :         size |= reg->offset ^ offset;
    1306             : 
    1307           0 :         intel_runtime_pm_get(dev_priv);
    1308             : 
    1309           0 :         switch (size) {
    1310             :         case 8 | 1:
    1311           0 :                 reg->val = I915_READ64_2x32(offset, offset+4);
    1312           0 :                 break;
    1313             :         case 8:
    1314           0 :                 reg->val = I915_READ64(offset);
    1315           0 :                 break;
    1316             :         case 4:
    1317           0 :                 reg->val = I915_READ(offset);
    1318           0 :                 break;
    1319             :         case 2:
    1320           0 :                 reg->val = I915_READ16(offset);
    1321           0 :                 break;
    1322             :         case 1:
    1323           0 :                 reg->val = I915_READ8(offset);
    1324           0 :                 break;
    1325             :         default:
    1326             :                 ret = -EINVAL;
    1327           0 :                 goto out;
    1328             :         }
    1329             : 
    1330             : out:
    1331           0 :         intel_runtime_pm_put(dev_priv);
    1332           0 :         return ret;
    1333           0 : }
    1334             : 
    1335           0 : int i915_get_reset_stats_ioctl(struct drm_device *dev,
    1336             :                                void *data, struct drm_file *file)
    1337             : {
    1338           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1339           0 :         struct drm_i915_reset_stats *args = data;
    1340             :         struct i915_ctx_hang_stats *hs;
    1341             :         struct intel_context *ctx;
    1342             :         int ret;
    1343             : 
    1344           0 :         if (args->flags || args->pad)
    1345           0 :                 return -EINVAL;
    1346             : 
    1347           0 :         if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
    1348           0 :                 return -EPERM;
    1349             : 
    1350           0 :         ret = mutex_lock_interruptible(&dev->struct_mutex);
    1351           0 :         if (ret)
    1352           0 :                 return ret;
    1353             : 
    1354           0 :         ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
    1355           0 :         if (IS_ERR(ctx)) {
    1356           0 :                 mutex_unlock(&dev->struct_mutex);
    1357           0 :                 return PTR_ERR(ctx);
    1358             :         }
    1359           0 :         hs = &ctx->hang_stats;
    1360             : 
    1361           0 :         if (capable(CAP_SYS_ADMIN))
    1362           0 :                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
    1363             :         else
    1364           0 :                 args->reset_count = 0;
    1365             : 
    1366           0 :         args->batch_active = hs->batch_active;
    1367           0 :         args->batch_pending = hs->batch_pending;
    1368             : 
    1369           0 :         mutex_unlock(&dev->struct_mutex);
    1370             : 
    1371           0 :         return 0;
    1372           0 : }
    1373             : 
    1374           0 : static int i915_reset_complete(struct drm_device *dev)
    1375             : {
    1376           0 :         u8 gdrst;
    1377           0 :         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
    1378           0 :         return (gdrst & GRDOM_RESET_STATUS) == 0;
    1379           0 : }
    1380             : 
    1381           0 : static int i915_do_reset(struct drm_device *dev)
    1382             : {
    1383             :         /* assert reset for at least 20 usec */
    1384           0 :         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
    1385           0 :         udelay(20);
    1386           0 :         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
    1387             : 
    1388           0 :         return wait_for(i915_reset_complete(dev), 500);
    1389             : }
    1390             : 
    1391           0 : static int g4x_reset_complete(struct drm_device *dev)
    1392             : {
    1393           0 :         u8 gdrst;
    1394           0 :         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
    1395           0 :         return (gdrst & GRDOM_RESET_ENABLE) == 0;
    1396           0 : }
    1397             : 
    1398           0 : static int g33_do_reset(struct drm_device *dev)
    1399             : {
    1400           0 :         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
    1401           0 :         return wait_for(g4x_reset_complete(dev), 500);
    1402             : }
    1403             : 
    1404           0 : static int g4x_do_reset(struct drm_device *dev)
    1405             : {
    1406           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1407             :         int ret;
    1408             : 
    1409           0 :         pci_write_config_byte(dev->pdev, I915_GDRST,
    1410             :                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
    1411           0 :         ret =  wait_for(g4x_reset_complete(dev), 500);
    1412           0 :         if (ret)
    1413           0 :                 return ret;
    1414             : 
    1415             :         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
    1416           0 :         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
    1417           0 :         POSTING_READ(VDECCLK_GATE_D);
    1418             : 
    1419           0 :         pci_write_config_byte(dev->pdev, I915_GDRST,
    1420             :                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
    1421           0 :         ret =  wait_for(g4x_reset_complete(dev), 500);
    1422           0 :         if (ret)
    1423           0 :                 return ret;
    1424             : 
    1425             :         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
    1426           0 :         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
    1427           0 :         POSTING_READ(VDECCLK_GATE_D);
    1428             : 
    1429           0 :         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
    1430             : 
    1431           0 :         return 0;
    1432           0 : }
    1433             : 
    1434           0 : static int ironlake_do_reset(struct drm_device *dev)
    1435             : {
    1436           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1437             :         int ret;
    1438             : 
    1439           0 :         I915_WRITE(ILK_GDSR,
    1440             :                    ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
    1441           0 :         ret = wait_for((I915_READ(ILK_GDSR) &
    1442             :                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
    1443           0 :         if (ret)
    1444           0 :                 return ret;
    1445             : 
    1446           0 :         I915_WRITE(ILK_GDSR,
    1447             :                    ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
    1448           0 :         ret = wait_for((I915_READ(ILK_GDSR) &
    1449             :                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
    1450           0 :         if (ret)
    1451           0 :                 return ret;
    1452             : 
    1453           0 :         I915_WRITE(ILK_GDSR, 0);
    1454             : 
    1455           0 :         return 0;
    1456           0 : }
    1457             : 
    1458           0 : static int gen6_do_reset(struct drm_device *dev)
    1459             : {
    1460           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1461             :         int     ret;
    1462             : 
    1463             :         /* Reset the chip */
    1464             : 
    1465             :         /* GEN6_GDRST is not in the gt power well, no need to check
    1466             :          * for fifo space for the write or forcewake the chip for
    1467             :          * the read
    1468             :          */
    1469           0 :         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
    1470             : 
    1471             :         /* Spin waiting for the device to ack the reset request */
    1472           0 :         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
    1473             : 
    1474           0 :         intel_uncore_forcewake_reset(dev, true);
    1475             : 
    1476           0 :         return ret;
    1477             : }
    1478             : 
    1479           0 : static int wait_for_register(struct drm_i915_private *dev_priv,
    1480             :                              const u32 reg,
    1481             :                              const u32 mask,
    1482             :                              const u32 value,
    1483             :                              const unsigned long timeout_ms)
    1484             : {
    1485           0 :         return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
    1486             : }
    1487             : 
    1488           0 : static int gen8_do_reset(struct drm_device *dev)
    1489             : {
    1490           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1491             :         struct intel_engine_cs *engine;
    1492             :         int i;
    1493             : 
    1494           0 :         for_each_ring(engine, dev_priv, i) {
    1495           0 :                 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
    1496             :                            _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
    1497             : 
    1498           0 :                 if (wait_for_register(dev_priv,
    1499           0 :                                       RING_RESET_CTL(engine->mmio_base),
    1500             :                                       RESET_CTL_READY_TO_RESET,
    1501             :                                       RESET_CTL_READY_TO_RESET,
    1502             :                                       700)) {
    1503           0 :                         DRM_ERROR("%s: reset request timeout\n", engine->name);
    1504             :                         goto not_ready;
    1505             :                 }
    1506             :         }
    1507             : 
    1508           0 :         return gen6_do_reset(dev);
    1509             : 
    1510             : not_ready:
    1511           0 :         for_each_ring(engine, dev_priv, i)
    1512           0 :                 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
    1513             :                            _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
    1514             : 
    1515           0 :         return -EIO;
    1516           0 : }
    1517             : 
    1518           0 : static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
    1519             : {
    1520           0 :         if (!i915.reset)
    1521           0 :                 return NULL;
    1522             : 
    1523           0 :         if (INTEL_INFO(dev)->gen >= 8)
    1524           0 :                 return gen8_do_reset;
    1525           0 :         else if (INTEL_INFO(dev)->gen >= 6)
    1526           0 :                 return gen6_do_reset;
    1527           0 :         else if (IS_GEN5(dev))
    1528           0 :                 return ironlake_do_reset;
    1529           0 :         else if (IS_G4X(dev))
    1530           0 :                 return g4x_do_reset;
    1531           0 :         else if (IS_G33(dev))
    1532           0 :                 return g33_do_reset;
    1533           0 :         else if (INTEL_INFO(dev)->gen >= 3)
    1534           0 :                 return i915_do_reset;
    1535             :         else
    1536           0 :                 return NULL;
    1537           0 : }
    1538             : 
    1539           0 : int intel_gpu_reset(struct drm_device *dev)
    1540             : {
    1541           0 :         struct drm_i915_private *dev_priv = to_i915(dev);
    1542             :         int (*reset)(struct drm_device *);
    1543             :         int ret;
    1544             : 
    1545           0 :         reset = intel_get_gpu_reset(dev);
    1546           0 :         if (reset == NULL)
    1547           0 :                 return -ENODEV;
    1548             : 
    1549             :         /* If the power well sleeps during the reset, the reset
    1550             :          * request may be dropped and never completes (causing -EIO).
    1551             :          */
    1552           0 :         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
    1553           0 :         ret = reset(dev);
    1554           0 :         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
    1555             : 
    1556           0 :         return ret;
    1557           0 : }
    1558             : 
    1559           0 : bool intel_has_gpu_reset(struct drm_device *dev)
    1560             : {
    1561           0 :         return intel_get_gpu_reset(dev) != NULL;
    1562             : }
    1563             : 
    1564           0 : void intel_uncore_check_errors(struct drm_device *dev)
    1565             : {
    1566           0 :         struct drm_i915_private *dev_priv = dev->dev_private;
    1567             : 
    1568           0 :         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
    1569           0 :             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
    1570             :                 DRM_DEBUG("Unclaimed register before interrupt\n");
    1571           0 :                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
    1572           0 :         }
    1573           0 : }

Generated by: LCOV version 1.13