LCOV - code coverage report
Current view: top level - dev/pci/drm/i915 - i915_drv.h (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 151 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 45 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* $OpenBSD: i915_drv.h,v 1.80 2018/06/25 22:29:16 kettenis Exp $ */
       2             : /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
       3             :  */
       4             : /*
       5             :  *
       6             :  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
       7             :  * All Rights Reserved.
       8             :  *
       9             :  * Permission is hereby granted, free of charge, to any person obtaining a
      10             :  * copy of this software and associated documentation files (the
      11             :  * "Software"), to deal in the Software without restriction, including
      12             :  * without limitation the rights to use, copy, modify, merge, publish,
      13             :  * distribute, sub license, and/or sell copies of the Software, and to
      14             :  * permit persons to whom the Software is furnished to do so, subject to
      15             :  * the following conditions:
      16             :  *
      17             :  * The above copyright notice and this permission notice (including the
      18             :  * next paragraph) shall be included in all copies or substantial portions
      19             :  * of the Software.
      20             :  *
      21             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
      22             :  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
      23             :  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
      24             :  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
      25             :  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
      26             :  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
      27             :  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
      28             :  *
      29             :  */
      30             : 
      31             : #ifndef _I915_DRV_H_
      32             : #define _I915_DRV_H_
      33             : 
      34             : #include <dev/pci/drm/i915_drm.h>
      35             : #include <dev/pci/drm/drm_fourcc.h>
      36             : 
      37             : #include "i915_reg.h"
      38             : #include "intel_bios.h"
      39             : #include "intel_ringbuffer.h"
      40             : #include "intel_lrc.h"
      41             : #include "i915_gem_gtt.h"
      42             : #include "i915_gem_render_state.h"
      43             : #include "intel_guc.h"
      44             : 
      45             : struct sg_table;
      46             : 
      47             : #define CONFIG_DRM_I915_FBDEV 1
      48             : #define CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT 1
      49             : 
      50             : #include "acpi.h"
      51             : #if NACPI > 0
      52             : #define CONFIG_ACPI
      53             : #endif
      54             : 
      55             : #include "drm.h"
      56             : #include "vga.h"
      57             : 
      58             : #include <dev/ic/mc6845reg.h>
      59             : #include <dev/ic/pcdisplayvar.h>
      60             : #include <dev/ic/vgareg.h>
      61             : #include <dev/ic/vgavar.h>
      62             : 
      63             : #include <sys/task.h>
      64             : #include <dev/pci/vga_pcivar.h>
      65             : #include <dev/wscons/wsconsio.h>
      66             : #include <dev/wscons/wsdisplayvar.h>
      67             : #include <dev/rasops/rasops.h>
      68             : 
      69             : extern int intel_enable_gtt(void);
      70             : extern void intel_gtt_chipset_flush(void);
      71             : extern int intel_gmch_probe(struct pci_dev *, struct pci_dev *, void *);
      72             : extern void intel_gtt_get(u64 *, size_t *, phys_addr_t *, u64 *);
      73             : extern void intel_gtt_insert_sg_entries(struct sg_table *, unsigned int,
      74             :                                         unsigned int);
      75             : extern void intel_gtt_clear_range(unsigned int, unsigned int);
      76             : extern void intel_gmch_remove(void);
      77             : 
      78             : #ifdef __i386__
      79             : 
      80             : static inline u_int64_t
      81             : bus_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
      82             : {
      83             :         u_int64_t lo, hi;
      84             : 
      85             :         lo = bus_space_read_4(t, h, o);
      86             :         hi = bus_space_read_4(t, h, o + 4);
      87             :         return (lo | (hi << 32));
      88             : }
      89             : 
      90             : static inline void
      91             : bus_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
      92             :     u_int64_t v)
      93             : {
      94             :         bus_space_write_4(t, h, o, v);
      95             :         bus_space_write_4(t, h, o + 4, v >> 32);
      96             : }
      97             : 
      98             : #endif
      99             : 
     100             : /*
     101             :  * The Bridge device's PCI config space has information about the
     102             :  * fb aperture size and the amount of pre-reserved memory.
     103             :  * This is all handled in the intel-gtt.ko module. i915.ko only
     104             :  * cares about the vga bit for the vga rbiter.
     105             :  */
     106             : #define INTEL_GMCH_CTRL         0x52
     107             : #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
     108             : #define SNB_GMCH_CTRL           0x50
     109             : #define    SNB_GMCH_GGMS_SHIFT  8 /* GTT Graphics Memory Size */
     110             : #define    SNB_GMCH_GGMS_MASK   0x3
     111             : #define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
     112             : #define    SNB_GMCH_GMS_MASK    0x1f
     113             : #define    BDW_GMCH_GGMS_SHIFT  6
     114             : #define    BDW_GMCH_GGMS_MASK   0x3
     115             : #define    BDW_GMCH_GMS_SHIFT   8
     116             : #define    BDW_GMCH_GMS_MASK    0xff
     117             : 
     118             : #define I830_GMCH_CTRL                  0x52
     119             : 
     120             : #define I830_GMCH_GMS_MASK              0x70
     121             : #define I830_GMCH_GMS_LOCAL             0x10
     122             : #define I830_GMCH_GMS_STOLEN_512        0x20
     123             : #define I830_GMCH_GMS_STOLEN_1024       0x30
     124             : #define I830_GMCH_GMS_STOLEN_8192       0x40
     125             : 
     126             : #define I855_GMCH_GMS_MASK              0xF0
     127             : #define I855_GMCH_GMS_STOLEN_0M         0x0
     128             : #define I855_GMCH_GMS_STOLEN_1M         (0x1 << 4)
     129             : #define I855_GMCH_GMS_STOLEN_4M         (0x2 << 4)
     130             : #define I855_GMCH_GMS_STOLEN_8M         (0x3 << 4)
     131             : #define I855_GMCH_GMS_STOLEN_16M        (0x4 << 4)
     132             : #define I855_GMCH_GMS_STOLEN_32M        (0x5 << 4)
     133             : #define I915_GMCH_GMS_STOLEN_48M        (0x6 << 4)
     134             : #define I915_GMCH_GMS_STOLEN_64M        (0x7 << 4)
     135             : #define G33_GMCH_GMS_STOLEN_128M        (0x8 << 4)
     136             : #define G33_GMCH_GMS_STOLEN_256M        (0x9 << 4)
     137             : #define INTEL_GMCH_GMS_STOLEN_96M       (0xa << 4)
     138             : #define INTEL_GMCH_GMS_STOLEN_160M      (0xb << 4)
     139             : #define INTEL_GMCH_GMS_STOLEN_224M      (0xc << 4)
     140             : #define INTEL_GMCH_GMS_STOLEN_352M      (0xd << 4)
     141             : 
     142             : #define I830_DRB3               0x63
     143             : #define I85X_DRB3               0x43
     144             : #define I865_TOUD               0xc4
     145             : 
     146             : #define I830_ESMRAMC            0x91
     147             : #define I845_ESMRAMC            0x9e
     148             : #define I85X_ESMRAMC            0x61
     149             : #define    TSEG_ENABLE          (1 << 0)
     150             : #define    I830_TSEG_SIZE_512K  (0 << 1)
     151             : #define    I830_TSEG_SIZE_1M    (1 << 1)
     152             : #define    I845_TSEG_SIZE_MASK  (3 << 1)
     153             : #define    I845_TSEG_SIZE_512K  (2 << 1)
     154             : #define    I845_TSEG_SIZE_1M    (3 << 1)
     155             : 
     156             : struct intel_gtt {
     157             :         /* Size of memory reserved for graphics by the BIOS */
     158             :         unsigned int stolen_size;
     159             :         /* Total number of gtt entries. */
     160             :         unsigned int gtt_total_entries;
     161             :         /* Part of the gtt that is mappable by the cpu, for those chips where
     162             :          * this is not the full gtt. */
     163             :         unsigned int gtt_mappable_entries;
     164             :         /* Share the scratch page dma with ppgtts. */
     165             :         bus_addr_t scratch_page_dma;
     166             :         struct drm_dmamem *scratch_page;
     167             :         /* for ppgtt PDE access */
     168             :         bus_space_handle_t gtt;
     169             :         /* needed for ioremap in drm/i915 */
     170             :         bus_addr_t gma_bus_addr;
     171             : };
     172             : 
     173             : /* General customization:
     174             :  */
     175             : 
     176             : #define DRIVER_NAME             "i915"
     177             : #define DRIVER_DESC             "Intel Graphics"
     178             : #define DRIVER_DATE             "20151010"
     179             : 
     180             : #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
     181             :                              (long) (x), __func__);
     182             : 
     183             : #define I915_STATE_WARN(condition, format...) WARN(condition, format)
     184             : #define I915_STATE_WARN_ON(condition) WARN_ON(condition)
     185             : 
     186           0 : static inline const char *yesno(bool v)
     187             : {
     188           0 :         return v ? "yes" : "no";
     189             : }
     190             : 
     191             : enum pipe {
     192             :         INVALID_PIPE = -1,
     193             :         PIPE_A = 0,
     194             :         PIPE_B,
     195             :         PIPE_C,
     196             :         _PIPE_EDP,
     197             :         I915_MAX_PIPES = _PIPE_EDP
     198             : };
     199             : #define pipe_name(p) ((p) + 'A')
     200             : 
     201             : enum transcoder {
     202             :         TRANSCODER_A = 0,
     203             :         TRANSCODER_B,
     204             :         TRANSCODER_C,
     205             :         TRANSCODER_EDP,
     206             :         I915_MAX_TRANSCODERS
     207             : };
     208             : #define transcoder_name(t) ((t) + 'A')
     209             : 
     210             : /*
     211             :  * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
     212             :  * number of planes per CRTC.  Not all platforms really have this many planes,
     213             :  * which means some arrays of size I915_MAX_PLANES may have unused entries
     214             :  * between the topmost sprite plane and the cursor plane.
     215             :  */
     216             : enum plane {
     217             :         PLANE_A = 0,
     218             :         PLANE_B,
     219             :         PLANE_C,
     220             :         PLANE_CURSOR,
     221             :         I915_MAX_PLANES,
     222             : };
     223             : #define plane_name(p) ((p) + 'A')
     224             : 
     225             : #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
     226             : 
     227             : enum port {
     228             :         PORT_A = 0,
     229             :         PORT_B,
     230             :         PORT_C,
     231             :         PORT_D,
     232             :         PORT_E,
     233             :         I915_MAX_PORTS
     234             : };
     235             : #define port_name(p) ((p) + 'A')
     236             : 
     237             : #define I915_NUM_PHYS_VLV 2
     238             : 
     239             : enum dpio_channel {
     240             :         DPIO_CH0,
     241             :         DPIO_CH1
     242             : };
     243             : 
     244             : enum dpio_phy {
     245             :         DPIO_PHY0,
     246             :         DPIO_PHY1
     247             : };
     248             : 
     249             : enum intel_display_power_domain {
     250             :         POWER_DOMAIN_PIPE_A,
     251             :         POWER_DOMAIN_PIPE_B,
     252             :         POWER_DOMAIN_PIPE_C,
     253             :         POWER_DOMAIN_PIPE_A_PANEL_FITTER,
     254             :         POWER_DOMAIN_PIPE_B_PANEL_FITTER,
     255             :         POWER_DOMAIN_PIPE_C_PANEL_FITTER,
     256             :         POWER_DOMAIN_TRANSCODER_A,
     257             :         POWER_DOMAIN_TRANSCODER_B,
     258             :         POWER_DOMAIN_TRANSCODER_C,
     259             :         POWER_DOMAIN_TRANSCODER_EDP,
     260             :         POWER_DOMAIN_PORT_DDI_A_2_LANES,
     261             :         POWER_DOMAIN_PORT_DDI_A_4_LANES,
     262             :         POWER_DOMAIN_PORT_DDI_B_2_LANES,
     263             :         POWER_DOMAIN_PORT_DDI_B_4_LANES,
     264             :         POWER_DOMAIN_PORT_DDI_C_2_LANES,
     265             :         POWER_DOMAIN_PORT_DDI_C_4_LANES,
     266             :         POWER_DOMAIN_PORT_DDI_D_2_LANES,
     267             :         POWER_DOMAIN_PORT_DDI_D_4_LANES,
     268             :         POWER_DOMAIN_PORT_DDI_E_2_LANES,
     269             :         POWER_DOMAIN_PORT_DSI,
     270             :         POWER_DOMAIN_PORT_CRT,
     271             :         POWER_DOMAIN_PORT_OTHER,
     272             :         POWER_DOMAIN_VGA,
     273             :         POWER_DOMAIN_AUDIO,
     274             :         POWER_DOMAIN_PLLS,
     275             :         POWER_DOMAIN_AUX_A,
     276             :         POWER_DOMAIN_AUX_B,
     277             :         POWER_DOMAIN_AUX_C,
     278             :         POWER_DOMAIN_AUX_D,
     279             :         POWER_DOMAIN_GMBUS,
     280             :         POWER_DOMAIN_INIT,
     281             : 
     282             :         POWER_DOMAIN_NUM,
     283             : };
     284             : 
     285             : #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
     286             : #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
     287             :                 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
     288             : #define POWER_DOMAIN_TRANSCODER(tran) \
     289             :         ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
     290             :          (tran) + POWER_DOMAIN_TRANSCODER_A)
     291             : 
     292             : enum hpd_pin {
     293             :         HPD_NONE = 0,
     294             :         HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
     295             :         HPD_CRT,
     296             :         HPD_SDVO_B,
     297             :         HPD_SDVO_C,
     298             :         HPD_PORT_A,
     299             :         HPD_PORT_B,
     300             :         HPD_PORT_C,
     301             :         HPD_PORT_D,
     302             :         HPD_PORT_E,
     303             :         HPD_NUM_PINS
     304             : };
     305             : 
     306             : #define for_each_hpd_pin(__pin) \
     307             :         for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
     308             : 
     309             : struct i915_hotplug {
     310             :         struct work_struct hotplug_work;
     311             : 
     312             :         struct {
     313             :                 unsigned long last_jiffies;
     314             :                 int count;
     315             :                 enum {
     316             :                         HPD_ENABLED = 0,
     317             :                         HPD_DISABLED = 1,
     318             :                         HPD_MARK_DISABLED = 2
     319             :                 } state;
     320             :         } stats[HPD_NUM_PINS];
     321             :         u32 event_bits;
     322             :         struct delayed_work reenable_work;
     323             : 
     324             :         struct intel_digital_port *irq_port[I915_MAX_PORTS];
     325             :         u32 long_port_mask;
     326             :         u32 short_port_mask;
     327             :         struct work_struct dig_port_work;
     328             : 
     329             :         /*
     330             :          * if we get a HPD irq from DP and a HPD irq from non-DP
     331             :          * the non-DP HPD could block the workqueue on a mode config
     332             :          * mutex getting, that userspace may have taken. However
     333             :          * userspace is waiting on the DP workqueue to run which is
     334             :          * blocked behind the non-DP one.
     335             :          */
     336             :         struct workqueue_struct *dp_wq;
     337             : };
     338             : 
     339             : #define I915_GEM_GPU_DOMAINS \
     340             :         (I915_GEM_DOMAIN_RENDER | \
     341             :          I915_GEM_DOMAIN_SAMPLER | \
     342             :          I915_GEM_DOMAIN_COMMAND | \
     343             :          I915_GEM_DOMAIN_INSTRUCTION | \
     344             :          I915_GEM_DOMAIN_VERTEX)
     345             : 
     346             : #define for_each_pipe(__dev_priv, __p) \
     347             :         for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
     348             : #define for_each_plane(__dev_priv, __pipe, __p)                         \
     349             :         for ((__p) = 0;                                                 \
     350             :              (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;   \
     351             :              (__p)++)
     352             : #define for_each_sprite(__dev_priv, __p, __s)                           \
     353             :         for ((__s) = 0;                                                 \
     354             :              (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];  \
     355             :              (__s)++)
     356             : 
     357             : #define for_each_crtc(dev, crtc) \
     358             :         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
     359             : 
     360             : #define for_each_intel_plane(dev, intel_plane) \
     361             :         list_for_each_entry(intel_plane,                        \
     362             :                             &dev->mode_config.plane_list,        \
     363             :                             base.head)
     364             : 
     365             : #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)      \
     366             :         list_for_each_entry(intel_plane,                                \
     367             :                             &(dev)->mode_config.plane_list,              \
     368             :                             base.head)                                  \
     369             :                 if ((intel_plane)->pipe == (intel_crtc)->pipe)
     370             : 
     371             : #define for_each_intel_crtc(dev, intel_crtc) \
     372             :         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
     373             : 
     374             : #define for_each_intel_encoder(dev, intel_encoder)              \
     375             :         list_for_each_entry(intel_encoder,                      \
     376             :                             &(dev)->mode_config.encoder_list,    \
     377             :                             base.head)
     378             : 
     379             : #define for_each_intel_connector(dev, intel_connector)          \
     380             :         list_for_each_entry(intel_connector,                    \
     381             :                             &dev->mode_config.connector_list,    \
     382             :                             base.head)
     383             : 
     384             : #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
     385             :         list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
     386             :                 if ((intel_encoder)->base.crtc == (__crtc))
     387             : 
     388             : #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
     389             :         list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
     390             :                 if ((intel_connector)->base.encoder == (__encoder))
     391             : 
     392             : #define for_each_power_domain(domain, mask)                             \
     393             :         for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)  \
     394             :                 if ((1 << (domain)) & (mask))
     395             : 
     396             : struct inteldrm_softc;
     397             : #define drm_i915_private inteldrm_softc
     398             : struct i915_mm_struct;
     399             : struct i915_mmu_object;
     400             : 
     401             : struct drm_i915_file_private {
     402             :         struct drm_i915_private *dev_priv;
     403             :         struct drm_file *file;
     404             : 
     405             :         struct {
     406             :                 spinlock_t lock;
     407             :                 struct list_head request_list;
     408             : /* 20ms is a fairly arbitrary limit (greater than the average frame time)
     409             :  * chosen to prevent the CPU getting more than a frame ahead of the GPU
     410             :  * (when using lax throttling for the frontbuffer). We also use it to
     411             :  * offer free GPU waitboosts for severely congested workloads.
     412             :  */
     413             : #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
     414             :         } mm;
     415             :         struct idr context_idr;
     416             : 
     417             :         struct intel_rps_client {
     418             :                 struct list_head link;
     419             :                 unsigned boosts;
     420             :         } rps;
     421             : 
     422             :         struct intel_engine_cs *bsd_ring;
     423             : };
     424             : 
     425             : enum intel_dpll_id {
     426             :         DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
     427             :         /* real shared dpll ids must be >= 0 */
     428             :         DPLL_ID_PCH_PLL_A = 0,
     429             :         DPLL_ID_PCH_PLL_B = 1,
     430             :         /* hsw/bdw */
     431             :         DPLL_ID_WRPLL1 = 0,
     432             :         DPLL_ID_WRPLL2 = 1,
     433             :         DPLL_ID_SPLL = 2,
     434             : 
     435             :         /* skl */
     436             :         DPLL_ID_SKL_DPLL1 = 0,
     437             :         DPLL_ID_SKL_DPLL2 = 1,
     438             :         DPLL_ID_SKL_DPLL3 = 2,
     439             : };
     440             : #define I915_NUM_PLLS 3
     441             : 
     442             : struct intel_dpll_hw_state {
     443             :         /* i9xx, pch plls */
     444             :         uint32_t dpll;
     445             :         uint32_t dpll_md;
     446             :         uint32_t fp0;
     447             :         uint32_t fp1;
     448             : 
     449             :         /* hsw, bdw */
     450             :         uint32_t wrpll;
     451             :         uint32_t spll;
     452             : 
     453             :         /* skl */
     454             :         /*
     455             :          * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
     456             :          * lower part of ctrl1 and they get shifted into position when writing
     457             :          * the register.  This allows us to easily compare the state to share
     458             :          * the DPLL.
     459             :          */
     460             :         uint32_t ctrl1;
     461             :         /* HDMI only, 0 when used for DP */
     462             :         uint32_t cfgcr1, cfgcr2;
     463             : 
     464             :         /* bxt */
     465             :         uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
     466             :                  pcsdw12;
     467             : };
     468             : 
     469             : struct intel_shared_dpll_config {
     470             :         unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
     471             :         struct intel_dpll_hw_state hw_state;
     472             : };
     473             : 
     474             : struct intel_shared_dpll {
     475             :         struct intel_shared_dpll_config config;
     476             : 
     477             :         int active; /* count of number of active CRTCs (i.e. DPMS on) */
     478             :         bool on; /* is the PLL actually active? Disabled during modeset */
     479             :         const char *name;
     480             :         /* should match the index in the dev_priv->shared_dplls array */
     481             :         enum intel_dpll_id id;
     482             :         /* The mode_set hook is optional and should be used together with the
     483             :          * intel_prepare_shared_dpll function. */
     484             :         void (*mode_set)(struct drm_i915_private *dev_priv,
     485             :                          struct intel_shared_dpll *pll);
     486             :         void (*enable)(struct drm_i915_private *dev_priv,
     487             :                        struct intel_shared_dpll *pll);
     488             :         void (*disable)(struct drm_i915_private *dev_priv,
     489             :                         struct intel_shared_dpll *pll);
     490             :         bool (*get_hw_state)(struct drm_i915_private *dev_priv,
     491             :                              struct intel_shared_dpll *pll,
     492             :                              struct intel_dpll_hw_state *hw_state);
     493             : };
     494             : 
     495             : #define SKL_DPLL0 0
     496             : #define SKL_DPLL1 1
     497             : #define SKL_DPLL2 2
     498             : #define SKL_DPLL3 3
     499             : 
     500             : /* Used by dp and fdi links */
     501             : struct intel_link_m_n {
     502             :         uint32_t        tu;
     503             :         uint32_t        gmch_m;
     504             :         uint32_t        gmch_n;
     505             :         uint32_t        link_m;
     506             :         uint32_t        link_n;
     507             : };
     508             : 
     509             : void intel_link_compute_m_n(int bpp, int nlanes,
     510             :                             int pixel_clock, int link_clock,
     511             :                             struct intel_link_m_n *m_n);
     512             : 
     513             : /* Interface history:
     514             :  *
     515             :  * 1.1: Original.
     516             :  * 1.2: Add Power Management
     517             :  * 1.3: Add vblank support
     518             :  * 1.4: Fix cmdbuffer path, add heap destroy
     519             :  * 1.5: Add vblank pipe configuration
     520             :  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
     521             :  *      - Support vertical blank on secondary display pipe
     522             :  */
     523             : #define DRIVER_MAJOR            1
     524             : #define DRIVER_MINOR            6
     525             : #define DRIVER_PATCHLEVEL       0
     526             : 
     527             : #define WATCH_LISTS     0
     528             : 
     529             : struct opregion_header;
     530             : struct opregion_acpi;
     531             : struct opregion_swsci;
     532             : struct opregion_asle;
     533             : 
     534             : struct intel_opregion {
     535             :         struct opregion_header *header;
     536             :         struct opregion_acpi *acpi;
     537             :         struct opregion_swsci *swsci;
     538             :         u32 swsci_gbda_sub_functions;
     539             :         u32 swsci_sbcb_sub_functions;
     540             :         struct opregion_asle *asle;
     541             :         void *vbt;
     542             :         u32 *lid_state;
     543             :         struct work_struct asle_work;
     544             : };
     545             : #define OPREGION_SIZE            (8*1024)
     546             : 
     547             : struct intel_overlay;
     548             : struct intel_overlay_error_state;
     549             : 
     550             : #define I915_FENCE_REG_NONE -1
     551             : #define I915_MAX_NUM_FENCES 32
     552             : /* 32 fences + sign bit for FENCE_REG_NONE */
     553             : #define I915_MAX_NUM_FENCE_BITS 6
     554             : 
     555             : struct drm_i915_fence_reg {
     556             :         struct list_head lru_list;
     557             :         struct drm_i915_gem_object *obj;
     558             :         int pin_count;
     559             : };
     560             : 
     561             : struct sdvo_device_mapping {
     562             :         u8 initialized;
     563             :         u8 dvo_port;
     564             :         u8 slave_addr;
     565             :         u8 dvo_wiring;
     566             :         u8 i2c_pin;
     567             :         u8 ddc_pin;
     568             : };
     569             : 
     570             : struct intel_display_error_state;
     571             : 
     572             : struct drm_i915_error_state {
     573             :         struct kref ref;
     574             :         struct timeval time;
     575             : 
     576             :         char error_msg[128];
     577             :         int iommu;
     578             :         u32 reset_count;
     579             :         u32 suspend_count;
     580             : 
     581             :         /* Generic register state */
     582             :         u32 eir;
     583             :         u32 pgtbl_er;
     584             :         u32 ier;
     585             :         u32 gtier[4];
     586             :         u32 ccid;
     587             :         u32 derrmr;
     588             :         u32 forcewake;
     589             :         u32 error; /* gen6+ */
     590             :         u32 err_int; /* gen7 */
     591             :         u32 fault_data0; /* gen8, gen9 */
     592             :         u32 fault_data1; /* gen8, gen9 */
     593             :         u32 done_reg;
     594             :         u32 gac_eco;
     595             :         u32 gam_ecochk;
     596             :         u32 gab_ctl;
     597             :         u32 gfx_mode;
     598             :         u32 extra_instdone[I915_NUM_INSTDONE_REG];
     599             :         u64 fence[I915_MAX_NUM_FENCES];
     600             :         struct intel_overlay_error_state *overlay;
     601             :         struct intel_display_error_state *display;
     602             :         struct drm_i915_error_object *semaphore_obj;
     603             : 
     604             :         struct drm_i915_error_ring {
     605             :                 bool valid;
     606             :                 /* Software tracked state */
     607             :                 bool waiting;
     608             :                 int hangcheck_score;
     609             :                 enum intel_ring_hangcheck_action hangcheck_action;
     610             :                 int num_requests;
     611             : 
     612             :                 /* our own tracking of ring head and tail */
     613             :                 u32 cpu_ring_head;
     614             :                 u32 cpu_ring_tail;
     615             : 
     616             :                 u32 semaphore_seqno[I915_NUM_RINGS - 1];
     617             : 
     618             :                 /* Register state */
     619             :                 u32 start;
     620             :                 u32 tail;
     621             :                 u32 head;
     622             :                 u32 ctl;
     623             :                 u32 hws;
     624             :                 u32 ipeir;
     625             :                 u32 ipehr;
     626             :                 u32 instdone;
     627             :                 u32 bbstate;
     628             :                 u32 instpm;
     629             :                 u32 instps;
     630             :                 u32 seqno;
     631             :                 u64 bbaddr;
     632             :                 u64 acthd;
     633             :                 u32 fault_reg;
     634             :                 u64 faddr;
     635             :                 u32 rc_psmi; /* sleep state */
     636             :                 u32 semaphore_mboxes[I915_NUM_RINGS - 1];
     637             : 
     638             :                 struct drm_i915_error_object {
     639             :                         int page_count;
     640             :                         u64 gtt_offset;
     641             :                         u32 *pages[0];
     642             :                 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
     643             : 
     644             :                 struct drm_i915_error_request {
     645             :                         long jiffies;
     646             :                         u32 seqno;
     647             :                         u32 tail;
     648             :                 } *requests;
     649             : 
     650             :                 struct {
     651             :                         u32 gfx_mode;
     652             :                         union {
     653             :                                 u64 pdp[4];
     654             :                                 u32 pp_dir_base;
     655             :                         };
     656             :                 } vm_info;
     657             : 
     658             :                 pid_t pid;
     659             :                 char comm[TASK_COMM_LEN];
     660             :         } ring[I915_NUM_RINGS];
     661             : 
     662             :         struct drm_i915_error_buffer {
     663             :                 u32 size;
     664             :                 u32 name;
     665             :                 u32 rseqno[I915_NUM_RINGS], wseqno;
     666             :                 u64 gtt_offset;
     667             :                 u32 read_domains;
     668             :                 u32 write_domain;
     669             :                 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
     670             :                 s32 pinned:2;
     671             :                 u32 tiling:2;
     672             :                 u32 dirty:1;
     673             :                 u32 purgeable:1;
     674             :                 u32 userptr:1;
     675             :                 s32 ring:4;
     676             :                 u32 cache_level:3;
     677             :         } **active_bo, **pinned_bo;
     678             : 
     679             :         u32 *active_bo_count, *pinned_bo_count;
     680             :         u32 vm_count;
     681             : };
     682             : 
     683             : struct intel_connector;
     684             : struct intel_encoder;
     685             : struct intel_crtc_state;
     686             : struct intel_initial_plane_config;
     687             : struct intel_crtc;
     688             : struct intel_limit;
     689             : struct dpll;
     690             : 
     691             : struct drm_i915_display_funcs {
     692             :         int (*get_display_clock_speed)(struct drm_device *dev);
     693             :         int (*get_fifo_size)(struct drm_device *dev, int plane);
     694             :         /**
     695             :          * find_dpll() - Find the best values for the PLL
     696             :          * @limit: limits for the PLL
     697             :          * @crtc: current CRTC
     698             :          * @target: target frequency in kHz
     699             :          * @refclk: reference clock frequency in kHz
     700             :          * @match_clock: if provided, @best_clock P divider must
     701             :          *               match the P divider from @match_clock
     702             :          *               used for LVDS downclocking
     703             :          * @best_clock: best PLL values found
     704             :          *
     705             :          * Returns true on success, false on failure.
     706             :          */
     707             :         bool (*find_dpll)(const struct intel_limit *limit,
     708             :                           struct intel_crtc_state *crtc_state,
     709             :                           int target, int refclk,
     710             :                           struct dpll *match_clock,
     711             :                           struct dpll *best_clock);
     712             :         void (*update_wm)(struct drm_crtc *crtc);
     713             :         void (*update_sprite_wm)(struct drm_plane *plane,
     714             :                                  struct drm_crtc *crtc,
     715             :                                  uint32_t sprite_width, uint32_t sprite_height,
     716             :                                  int pixel_size, bool enable, bool scaled);
     717             :         int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
     718             :         void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
     719             :         /* Returns the active state of the crtc, and if the crtc is active,
     720             :          * fills out the pipe-config with the hw state. */
     721             :         bool (*get_pipe_config)(struct intel_crtc *,
     722             :                                 struct intel_crtc_state *);
     723             :         void (*get_initial_plane_config)(struct intel_crtc *,
     724             :                                          struct intel_initial_plane_config *);
     725             :         int (*crtc_compute_clock)(struct intel_crtc *crtc,
     726             :                                   struct intel_crtc_state *crtc_state);
     727             :         void (*crtc_enable)(struct drm_crtc *crtc);
     728             :         void (*crtc_disable)(struct drm_crtc *crtc);
     729             :         void (*audio_codec_enable)(struct drm_connector *connector,
     730             :                                    struct intel_encoder *encoder,
     731             :                                    const struct drm_display_mode *adjusted_mode);
     732             :         void (*audio_codec_disable)(struct intel_encoder *encoder);
     733             :         void (*fdi_link_train)(struct drm_crtc *crtc);
     734             :         void (*init_clock_gating)(struct drm_device *dev);
     735             :         int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
     736             :                           struct drm_framebuffer *fb,
     737             :                           struct drm_i915_gem_object *obj,
     738             :                           struct drm_i915_gem_request *req,
     739             :                           uint32_t flags);
     740             :         void (*update_primary_plane)(struct drm_crtc *crtc,
     741             :                                      struct drm_framebuffer *fb,
     742             :                                      int x, int y);
     743             :         void (*hpd_irq_setup)(struct drm_device *dev);
     744             :         /* clock updates for mode set */
     745             :         /* cursor updates */
     746             :         /* render clock increase/decrease */
     747             :         /* display clock increase/decrease */
     748             :         /* pll clock increase/decrease */
     749             : };
     750             : 
     751             : enum forcewake_domain_id {
     752             :         FW_DOMAIN_ID_RENDER = 0,
     753             :         FW_DOMAIN_ID_BLITTER,
     754             :         FW_DOMAIN_ID_MEDIA,
     755             : 
     756             :         FW_DOMAIN_ID_COUNT
     757             : };
     758             : 
     759             : enum forcewake_domains {
     760             :         FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
     761             :         FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
     762             :         FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
     763             :         FORCEWAKE_ALL = (FORCEWAKE_RENDER |
     764             :                          FORCEWAKE_BLITTER |
     765             :                          FORCEWAKE_MEDIA)
     766             : };
     767             : 
     768             : struct intel_uncore_funcs {
     769             :         void (*force_wake_get)(struct drm_i915_private *dev_priv,
     770             :                                                         enum forcewake_domains domains);
     771             :         void (*force_wake_put)(struct drm_i915_private *dev_priv,
     772             :                                                         enum forcewake_domains domains);
     773             : 
     774             :         uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
     775             :         uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
     776             :         uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
     777             :         uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
     778             : 
     779             :         void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
     780             :                                 uint8_t val, bool trace);
     781             :         void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
     782             :                                 uint16_t val, bool trace);
     783             :         void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
     784             :                                 uint32_t val, bool trace);
     785             :         void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
     786             :                                 uint64_t val, bool trace);
     787             : };
     788             : 
     789             : struct intel_uncore {
     790             :         spinlock_t lock; /** lock is also taken in irq contexts. */
     791             : 
     792             :         struct intel_uncore_funcs funcs;
     793             : 
     794             :         unsigned fifo_count;
     795             :         enum forcewake_domains fw_domains;
     796             : 
     797             :         struct intel_uncore_forcewake_domain {
     798             :                 struct drm_i915_private *i915;
     799             :                 enum forcewake_domain_id id;
     800             :                 unsigned wake_count;
     801             :                 struct timeout timer;
     802             :                 u32 reg_set;
     803             :                 u32 val_set;
     804             :                 u32 val_clear;
     805             :                 u32 reg_ack;
     806             :                 u32 reg_post;
     807             :                 u32 val_reset;
     808             :         } fw_domain[FW_DOMAIN_ID_COUNT];
     809             : };
     810             : 
     811             : /* Iterate over initialised fw domains */
     812             : #define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
     813             :         for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
     814             :              (i__) < FW_DOMAIN_ID_COUNT; \
     815             :              (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
     816             :                 if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
     817             : 
     818             : #define for_each_fw_domain(domain__, dev_priv__, i__) \
     819             :         for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
     820             : 
     821             : enum csr_state {
     822             :         FW_UNINITIALIZED = 0,
     823             :         FW_LOADED,
     824             :         FW_FAILED
     825             : };
     826             : 
     827             : struct intel_csr {
     828             :         const char *fw_path;
     829             :         uint32_t *dmc_payload;
     830             :         uint32_t dmc_fw_size;
     831             :         uint32_t mmio_count;
     832             :         uint32_t mmioaddr[8];
     833             :         uint32_t mmiodata[8];
     834             :         enum csr_state state;
     835             : };
     836             : 
     837             : #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
     838             :         func(is_mobile) sep \
     839             :         func(is_i85x) sep \
     840             :         func(is_i915g) sep \
     841             :         func(is_i945gm) sep \
     842             :         func(is_g33) sep \
     843             :         func(need_gfx_hws) sep \
     844             :         func(is_g4x) sep \
     845             :         func(is_pineview) sep \
     846             :         func(is_broadwater) sep \
     847             :         func(is_crestline) sep \
     848             :         func(is_ivybridge) sep \
     849             :         func(is_valleyview) sep \
     850             :         func(is_haswell) sep \
     851             :         func(is_skylake) sep \
     852             :         func(is_broxton) sep \
     853             :         func(is_kabylake) sep \
     854             :         func(is_preliminary) sep \
     855             :         func(has_fbc) sep \
     856             :         func(has_pipe_cxsr) sep \
     857             :         func(has_hotplug) sep \
     858             :         func(cursor_needs_physical) sep \
     859             :         func(has_overlay) sep \
     860             :         func(overlay_needs_physical) sep \
     861             :         func(supports_tv) sep \
     862             :         func(has_llc) sep \
     863             :         func(has_ddi) sep \
     864             :         func(has_fpga_dbg)
     865             : 
     866             : #define DEFINE_FLAG(name) u8 name:1
     867             : #define SEP_SEMICOLON ;
     868             : 
     869             : struct intel_device_info {
     870             :         u32 display_mmio_offset;
     871             :         u16 device_id;
     872             :         u8 num_pipes:3;
     873             :         u8 num_sprites[I915_MAX_PIPES];
     874             :         u8 gen;
     875             :         u8 ring_mask; /* Rings supported by the HW */
     876             :         DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
     877             :         /* Register offsets for the various display pipes and transcoders */
     878             :         int pipe_offsets[I915_MAX_TRANSCODERS];
     879             :         int trans_offsets[I915_MAX_TRANSCODERS];
     880             :         int palette_offsets[I915_MAX_PIPES];
     881             :         int cursor_offsets[I915_MAX_PIPES];
     882             : 
     883             :         /* Slice/subslice/EU info */
     884             :         u8 slice_total;
     885             :         u8 subslice_total;
     886             :         u8 subslice_per_slice;
     887             :         u8 eu_total;
     888             :         u8 eu_per_subslice;
     889             :         /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
     890             :         u8 subslice_7eu[3];
     891             :         u8 has_slice_pg:1;
     892             :         u8 has_subslice_pg:1;
     893             :         u8 has_eu_pg:1;
     894             : };
     895             : 
     896             : #undef DEFINE_FLAG
     897             : #undef SEP_SEMICOLON
     898             : 
     899             : enum i915_cache_level {
     900             :         I915_CACHE_NONE = 0,
     901             :         I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
     902             :         I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
     903             :                               caches, eg sampler/render caches, and the
     904             :                               large Last-Level-Cache. LLC is coherent with
     905             :                               the CPU, but L3 is only visible to the GPU. */
     906             :         I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
     907             : };
     908             : 
     909             : struct i915_ctx_hang_stats {
     910             :         /* This context had batch pending when hang was declared */
     911             :         unsigned batch_pending;
     912             : 
     913             :         /* This context had batch active when hang was declared */
     914             :         unsigned batch_active;
     915             : 
     916             :         /* Time when this context was last blamed for a GPU reset */
     917             :         unsigned long guilty_ts;
     918             : 
     919             :         /* If the contexts causes a second GPU hang within this time,
     920             :          * it is permanently banned from submitting any more work.
     921             :          */
     922             :         unsigned long ban_period_seconds;
     923             : 
     924             :         /* This context is banned to submit more work */
     925             :         bool banned;
     926             : };
     927             : 
     928             : /* This must match up with the value previously used for execbuf2.rsvd1. */
     929             : #define DEFAULT_CONTEXT_HANDLE 0
     930             : 
     931             : #define CONTEXT_NO_ZEROMAP (1<<0)
     932             : /**
     933             :  * struct intel_context - as the name implies, represents a context.
     934             :  * @ref: reference count.
     935             :  * @user_handle: userspace tracking identity for this context.
     936             :  * @remap_slice: l3 row remapping information.
     937             :  * @flags: context specific flags:
     938             :  *         CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
     939             :  * @file_priv: filp associated with this context (NULL for global default
     940             :  *             context).
     941             :  * @hang_stats: information about the role of this context in possible GPU
     942             :  *              hangs.
     943             :  * @ppgtt: virtual memory space used by this context.
     944             :  * @legacy_hw_ctx: render context backing object and whether it is correctly
     945             :  *                initialized (legacy ring submission mechanism only).
     946             :  * @link: link in the global list of contexts.
     947             :  *
     948             :  * Contexts are memory images used by the hardware to store copies of their
     949             :  * internal state.
     950             :  */
     951             : struct intel_context {
     952             :         struct kref ref;
     953             :         int user_handle;
     954             :         uint8_t remap_slice;
     955             :         struct drm_i915_private *i915;
     956             :         int flags;
     957             :         struct drm_i915_file_private *file_priv;
     958             :         struct i915_ctx_hang_stats hang_stats;
     959             :         struct i915_hw_ppgtt *ppgtt;
     960             : 
     961             :         /* Legacy ring buffer submission */
     962             :         struct {
     963             :                 struct drm_i915_gem_object *rcs_state;
     964             :                 bool initialized;
     965             :         } legacy_hw_ctx;
     966             : 
     967             :         /* Execlists */
     968             :         struct {
     969             :                 struct drm_i915_gem_object *state;
     970             :                 struct intel_ringbuffer *ringbuf;
     971             :                 int pin_count;
     972             :         } engine[I915_NUM_RINGS];
     973             : 
     974             :         struct list_head link;
     975             : };
     976             : 
     977             : enum fb_op_origin {
     978             :         ORIGIN_GTT,
     979             :         ORIGIN_CPU,
     980             :         ORIGIN_CS,
     981             :         ORIGIN_FLIP,
     982             :         ORIGIN_DIRTYFB,
     983             : };
     984             : 
     985             : struct i915_fbc {
     986             :         /* This is always the inner lock when overlapping with struct_mutex and
     987             :          * it's the outer lock when overlapping with stolen_lock. */
     988             :         struct rwlock lock;
     989             :         unsigned long uncompressed_size;
     990             :         unsigned threshold;
     991             :         unsigned int fb_id;
     992             :         unsigned int possible_framebuffer_bits;
     993             :         unsigned int busy_bits;
     994             :         struct intel_crtc *crtc;
     995             :         int y;
     996             : 
     997             :         struct drm_mm_node compressed_fb;
     998             :         struct drm_mm_node *compressed_llb;
     999             : 
    1000             :         bool false_color;
    1001             : 
    1002             :         /* Tracks whether the HW is actually enabled, not whether the feature is
    1003             :          * possible. */
    1004             :         bool enabled;
    1005             : 
    1006             :         struct intel_fbc_work {
    1007             :                 struct delayed_work work;
    1008             :                 struct intel_crtc *crtc;
    1009             :                 struct drm_framebuffer *fb;
    1010             :         } *fbc_work;
    1011             : 
    1012             :         enum no_fbc_reason {
    1013             :                 FBC_OK, /* FBC is enabled */
    1014             :                 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
    1015             :                 FBC_NO_OUTPUT, /* no outputs enabled to compress */
    1016             :                 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
    1017             :                 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
    1018             :                 FBC_MODE_TOO_LARGE, /* mode too large for compression */
    1019             :                 FBC_BAD_PLANE, /* fbc not supported on plane */
    1020             :                 FBC_NOT_TILED, /* buffer not tiled */
    1021             :                 FBC_MULTIPLE_PIPES, /* more than one pipe active */
    1022             :                 FBC_MODULE_PARAM,
    1023             :                 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
    1024             :                 FBC_ROTATION, /* rotation is not supported */
    1025             :                 FBC_IN_DBG_MASTER, /* kernel debugger is active */
    1026             :                 FBC_BAD_STRIDE, /* stride is not supported */
    1027             :                 FBC_PIXEL_RATE, /* pixel rate is too big */
    1028             :                 FBC_PIXEL_FORMAT /* pixel format is invalid */
    1029             :         } no_fbc_reason;
    1030             : 
    1031             :         bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
    1032             :         void (*enable_fbc)(struct intel_crtc *crtc);
    1033             :         void (*disable_fbc)(struct drm_i915_private *dev_priv);
    1034             : };
    1035             : 
    1036             : /**
    1037             :  * HIGH_RR is the highest eDP panel refresh rate read from EDID
    1038             :  * LOW_RR is the lowest eDP panel refresh rate found from EDID
    1039             :  * parsing for same resolution.
    1040             :  */
    1041             : enum drrs_refresh_rate_type {
    1042             :         DRRS_HIGH_RR,
    1043             :         DRRS_LOW_RR,
    1044             :         DRRS_MAX_RR, /* RR count */
    1045             : };
    1046             : 
    1047             : enum drrs_support_type {
    1048             :         DRRS_NOT_SUPPORTED = 0,
    1049             :         STATIC_DRRS_SUPPORT = 1,
    1050             :         SEAMLESS_DRRS_SUPPORT = 2
    1051             : };
    1052             : 
    1053             : struct intel_dp;
    1054             : struct i915_drrs {
    1055             :         struct rwlock mutex;
    1056             :         struct delayed_work work;
    1057             :         struct intel_dp *dp;
    1058             :         unsigned busy_frontbuffer_bits;
    1059             :         enum drrs_refresh_rate_type refresh_rate_type;
    1060             :         enum drrs_support_type type;
    1061             : };
    1062             : 
    1063             : struct i915_psr {
    1064             :         struct rwlock lock;
    1065             :         bool sink_support;
    1066             :         bool source_ok;
    1067             :         struct intel_dp *enabled;
    1068             :         bool active;
    1069             :         struct delayed_work work;
    1070             :         unsigned busy_frontbuffer_bits;
    1071             :         bool psr2_support;
    1072             :         bool aux_frame_sync;
    1073             : };
    1074             : 
    1075             : enum intel_pch {
    1076             :         PCH_NONE = 0,   /* No PCH present */
    1077             :         PCH_IBX,        /* Ibexpeak PCH */
    1078             :         PCH_CPT,        /* Cougarpoint PCH */
    1079             :         PCH_LPT,        /* Lynxpoint PCH */
    1080             :         PCH_SPT,        /* Sunrisepoint PCH */
    1081             :         PCH_KBP,        /* Kabypoint PCH */
    1082             :         PCH_NOP,
    1083             : };
    1084             : 
    1085             : enum intel_sbi_destination {
    1086             :         SBI_ICLK,
    1087             :         SBI_MPHY,
    1088             : };
    1089             : 
    1090             : #define QUIRK_PIPEA_FORCE (1<<0)
    1091             : #define QUIRK_LVDS_SSC_DISABLE (1<<1)
    1092             : #define QUIRK_INVERT_BRIGHTNESS (1<<2)
    1093             : #define QUIRK_BACKLIGHT_PRESENT (1<<3)
    1094             : #define QUIRK_PIPEB_FORCE (1<<4)
    1095             : #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
    1096             : 
    1097             : struct intel_fbdev;
    1098             : struct intel_fbc_work;
    1099             : 
    1100             : struct intel_gmbus {
    1101             :         struct i2c_adapter adapter;
    1102             :         u32 force_bit;
    1103             :         u32 reg0;
    1104             :         u32 gpio_reg;
    1105             :         struct i2c_algo_bit_data bit_algo;
    1106             :         struct drm_i915_private *dev_priv;
    1107             : };
    1108             : 
    1109             : struct i915_suspend_saved_registers {
    1110             :         u32 saveDSPARB;
    1111             :         u32 saveLVDS;
    1112             :         u32 savePP_ON_DELAYS;
    1113             :         u32 savePP_OFF_DELAYS;
    1114             :         u32 savePP_ON;
    1115             :         u32 savePP_OFF;
    1116             :         u32 savePP_CONTROL;
    1117             :         u32 savePP_DIVISOR;
    1118             :         u32 saveFBC_CONTROL;
    1119             :         u32 saveCACHE_MODE_0;
    1120             :         u32 saveMI_ARB_STATE;
    1121             :         u32 saveSWF0[16];
    1122             :         u32 saveSWF1[16];
    1123             :         u32 saveSWF3[3];
    1124             :         uint64_t saveFENCE[I915_MAX_NUM_FENCES];
    1125             :         u32 savePCH_PORT_HOTPLUG;
    1126             :         u16 saveGCDGMBUS;
    1127             : };
    1128             : 
    1129             : struct vlv_s0ix_state {
    1130             :         /* GAM */
    1131             :         u32 wr_watermark;
    1132             :         u32 gfx_prio_ctrl;
    1133             :         u32 arb_mode;
    1134             :         u32 gfx_pend_tlb0;
    1135             :         u32 gfx_pend_tlb1;
    1136             :         u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
    1137             :         u32 media_max_req_count;
    1138             :         u32 gfx_max_req_count;
    1139             :         u32 render_hwsp;
    1140             :         u32 ecochk;
    1141             :         u32 bsd_hwsp;
    1142             :         u32 blt_hwsp;
    1143             :         u32 tlb_rd_addr;
    1144             : 
    1145             :         /* MBC */
    1146             :         u32 g3dctl;
    1147             :         u32 gsckgctl;
    1148             :         u32 mbctl;
    1149             : 
    1150             :         /* GCP */
    1151             :         u32 ucgctl1;
    1152             :         u32 ucgctl3;
    1153             :         u32 rcgctl1;
    1154             :         u32 rcgctl2;
    1155             :         u32 rstctl;
    1156             :         u32 misccpctl;
    1157             : 
    1158             :         /* GPM */
    1159             :         u32 gfxpause;
    1160             :         u32 rpdeuhwtc;
    1161             :         u32 rpdeuc;
    1162             :         u32 ecobus;
    1163             :         u32 pwrdwnupctl;
    1164             :         u32 rp_down_timeout;
    1165             :         u32 rp_deucsw;
    1166             :         u32 rcubmabdtmr;
    1167             :         u32 rcedata;
    1168             :         u32 spare2gh;
    1169             : 
    1170             :         /* Display 1 CZ domain */
    1171             :         u32 gt_imr;
    1172             :         u32 gt_ier;
    1173             :         u32 pm_imr;
    1174             :         u32 pm_ier;
    1175             :         u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
    1176             : 
    1177             :         /* GT SA CZ domain */
    1178             :         u32 tilectl;
    1179             :         u32 gt_fifoctl;
    1180             :         u32 gtlc_wake_ctrl;
    1181             :         u32 gtlc_survive;
    1182             :         u32 pmwgicz;
    1183             : 
    1184             :         /* Display 2 CZ domain */
    1185             :         u32 gu_ctl0;
    1186             :         u32 gu_ctl1;
    1187             :         u32 pcbr;
    1188             :         u32 clock_gate_dis2;
    1189             : };
    1190             : 
    1191             : struct intel_rps_ei {
    1192             :         u32 cz_clock;
    1193             :         u32 render_c0;
    1194             :         u32 media_c0;
    1195             : };
    1196             : 
    1197             : struct intel_gen6_power_mgmt {
    1198             :         /*
    1199             :          * work, interrupts_enabled and pm_iir are protected by
    1200             :          * dev_priv->irq_lock
    1201             :          */
    1202             :         struct work_struct work;
    1203             :         bool interrupts_enabled;
    1204             :         u32 pm_iir;
    1205             : 
    1206             :         /* Frequencies are stored in potentially platform dependent multiples.
    1207             :          * In other words, *_freq needs to be multiplied by X to be interesting.
    1208             :          * Soft limits are those which are used for the dynamic reclocking done
    1209             :          * by the driver (raise frequencies under heavy loads, and lower for
    1210             :          * lighter loads). Hard limits are those imposed by the hardware.
    1211             :          *
    1212             :          * A distinction is made for overclocking, which is never enabled by
    1213             :          * default, and is considered to be above the hard limit if it's
    1214             :          * possible at all.
    1215             :          */
    1216             :         u8 cur_freq;            /* Current frequency (cached, may not == HW) */
    1217             :         u8 min_freq_softlimit;  /* Minimum frequency permitted by the driver */
    1218             :         u8 max_freq_softlimit;  /* Max frequency permitted by the driver */
    1219             :         u8 max_freq;            /* Maximum frequency, RP0 if not overclocking */
    1220             :         u8 min_freq;            /* AKA RPn. Minimum frequency */
    1221             :         u8 idle_freq;           /* Frequency to request when we are idle */
    1222             :         u8 efficient_freq;      /* AKA RPe. Pre-determined balanced frequency */
    1223             :         u8 rp1_freq;            /* "less than" RP0 power/freqency */
    1224             :         u8 rp0_freq;            /* Non-overclocked max frequency. */
    1225             : 
    1226             :         u8 up_threshold; /* Current %busy required to uplock */
    1227             :         u8 down_threshold; /* Current %busy required to downclock */
    1228             : 
    1229             :         int last_adj;
    1230             :         enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
    1231             : 
    1232             :         spinlock_t client_lock;
    1233             :         struct list_head clients;
    1234             :         bool client_boost;
    1235             : 
    1236             :         bool enabled;
    1237             :         struct delayed_work delayed_resume_work;
    1238             :         unsigned boosts;
    1239             : 
    1240             :         struct intel_rps_client semaphores, mmioflips;
    1241             : 
    1242             :         /* manual wa residency calculations */
    1243             :         struct intel_rps_ei ei;
    1244             : 
    1245             :         /*
    1246             :          * Protects RPS/RC6 register access and PCU communication.
    1247             :          * Must be taken after struct_mutex if nested. Note that
    1248             :          * this lock may be held for long periods of time when
    1249             :          * talking to hw - so only take it when talking to hw!
    1250             :          */
    1251             :         struct rwlock hw_lock;
    1252             : };
    1253             : 
    1254             : /* defined intel_pm.c */
    1255             : extern spinlock_t mchdev_lock;
    1256             : 
    1257             : struct intel_ilk_power_mgmt {
    1258             :         u8 cur_delay;
    1259             :         u8 min_delay;
    1260             :         u8 max_delay;
    1261             :         u8 fmax;
    1262             :         u8 fstart;
    1263             : 
    1264             :         u64 last_count1;
    1265             :         unsigned long last_time1;
    1266             :         unsigned long chipset_power;
    1267             :         u64 last_count2;
    1268             :         u64 last_time2;
    1269             :         unsigned long gfx_power;
    1270             :         u8 corr;
    1271             : 
    1272             :         int c_m;
    1273             :         int r_t;
    1274             : };
    1275             : 
    1276             : struct drm_i915_private;
    1277             : struct i915_power_well;
    1278             : 
    1279             : struct i915_power_well_ops {
    1280             :         /*
    1281             :          * Synchronize the well's hw state to match the current sw state, for
    1282             :          * example enable/disable it based on the current refcount. Called
    1283             :          * during driver init and resume time, possibly after first calling
    1284             :          * the enable/disable handlers.
    1285             :          */
    1286             :         void (*sync_hw)(struct drm_i915_private *dev_priv,
    1287             :                         struct i915_power_well *power_well);
    1288             :         /*
    1289             :          * Enable the well and resources that depend on it (for example
    1290             :          * interrupts located on the well). Called after the 0->1 refcount
    1291             :          * transition.
    1292             :          */
    1293             :         void (*enable)(struct drm_i915_private *dev_priv,
    1294             :                        struct i915_power_well *power_well);
    1295             :         /*
    1296             :          * Disable the well and resources that depend on it. Called after
    1297             :          * the 1->0 refcount transition.
    1298             :          */
    1299             :         void (*disable)(struct drm_i915_private *dev_priv,
    1300             :                         struct i915_power_well *power_well);
    1301             :         /* Returns the hw enabled state. */
    1302             :         bool (*is_enabled)(struct drm_i915_private *dev_priv,
    1303             :                            struct i915_power_well *power_well);
    1304             : };
    1305             : 
    1306             : /* Power well structure for haswell */
    1307             : struct i915_power_well {
    1308             :         const char *name;
    1309             :         bool always_on;
    1310             :         /* power well enable/disable usage count */
    1311             :         int count;
    1312             :         /* cached hw enabled state */
    1313             :         bool hw_enabled;
    1314             :         unsigned long domains;
    1315             :         unsigned long data;
    1316             :         const struct i915_power_well_ops *ops;
    1317             : };
    1318             : 
    1319             : struct i915_power_domains {
    1320             :         /*
    1321             :          * Power wells needed for initialization at driver init and suspend
    1322             :          * time are on. They are kept on until after the first modeset.
    1323             :          */
    1324             :         bool init_power_on;
    1325             :         bool initializing;
    1326             :         int power_well_count;
    1327             : 
    1328             :         struct rwlock lock;
    1329             :         int domain_use_count[POWER_DOMAIN_NUM];
    1330             :         struct i915_power_well *power_wells;
    1331             : };
    1332             : 
    1333             : #define MAX_L3_SLICES 2
    1334             : struct intel_l3_parity {
    1335             :         u32 *remap_info[MAX_L3_SLICES];
    1336             :         struct work_struct error_work;
    1337             :         int which_slice;
    1338             : };
    1339             : 
    1340             : struct i915_gem_mm {
    1341             :         /** Memory allocator for GTT stolen memory */
    1342             :         struct drm_mm stolen;
    1343             :         /** Protects the usage of the GTT stolen memory allocator. This is
    1344             :          * always the inner lock when overlapping with struct_mutex. */
    1345             :         struct rwlock stolen_lock;
    1346             : 
    1347             :         /** List of all objects in gtt_space. Used to restore gtt
    1348             :          * mappings on resume */
    1349             :         struct list_head bound_list;
    1350             :         /**
    1351             :          * List of objects which are not bound to the GTT (thus
    1352             :          * are idle and not used by the GPU) but still have
    1353             :          * (presumably uncached) pages still attached.
    1354             :          */
    1355             :         struct list_head unbound_list;
    1356             : 
    1357             :         /** Usable portion of the GTT for GEM */
    1358             :         unsigned long stolen_base; /* limited to low memory (32-bit) */
    1359             : 
    1360             :         /** PPGTT used for aliasing the PPGTT with the GTT */
    1361             :         struct i915_hw_ppgtt *aliasing_ppgtt;
    1362             : 
    1363             : #ifdef __linux__
    1364             :         struct notifier_block oom_notifier;
    1365             :         struct shrinker shrinker;
    1366             :         bool shrinker_no_lock_stealing;
    1367             : #endif
    1368             : 
    1369             :         /** LRU list of objects with fence regs on them. */
    1370             :         struct list_head fence_list;
    1371             : 
    1372             :         /**
    1373             :          * We leave the user IRQ off as much as possible,
    1374             :          * but this means that requests will finish and never
    1375             :          * be retired once the system goes idle. Set a timer to
    1376             :          * fire periodically while the ring is running. When it
    1377             :          * fires, go retire requests.
    1378             :          */
    1379             :         struct delayed_work retire_work;
    1380             : 
    1381             :         /**
    1382             :          * When we detect an idle GPU, we want to turn on
    1383             :          * powersaving features. So once we see that there
    1384             :          * are no more requests outstanding and no more
    1385             :          * arrive within a small period of time, we fire
    1386             :          * off the idle_work.
    1387             :          */
    1388             :         struct delayed_work idle_work;
    1389             : 
    1390             :         /**
    1391             :          * Are we in a non-interruptible section of code like
    1392             :          * modesetting?
    1393             :          */
    1394             :         bool interruptible;
    1395             : 
    1396             :         /**
    1397             :          * Is the GPU currently considered idle, or busy executing userspace
    1398             :          * requests?  Whilst idle, we attempt to power down the hardware and
    1399             :          * display clocks. In order to reduce the effect on performance, there
    1400             :          * is a slight delay before we do so.
    1401             :          */
    1402             :         bool busy;
    1403             : 
    1404             :         /* the indicator for dispatch video commands on two BSD rings */
    1405             :         int bsd_ring_dispatch_index;
    1406             : 
    1407             :         /** Bit 6 swizzling required for X tiling */
    1408             :         uint32_t bit_6_swizzle_x;
    1409             :         /** Bit 6 swizzling required for Y tiling */
    1410             :         uint32_t bit_6_swizzle_y;
    1411             : 
    1412             :         /* accounting, useful for userland debugging */
    1413             :         spinlock_t object_stat_lock;
    1414             :         size_t object_memory;
    1415             :         u32 object_count;
    1416             : };
    1417             : 
    1418             : struct drm_i915_error_state_buf {
    1419             :         struct drm_i915_private *i915;
    1420             :         unsigned bytes;
    1421             :         unsigned size;
    1422             :         int err;
    1423             :         u8 *buf;
    1424             :         loff_t start;
    1425             :         loff_t pos;
    1426             : };
    1427             : 
    1428             : struct i915_error_state_file_priv {
    1429             :         struct drm_device *dev;
    1430             :         struct drm_i915_error_state *error;
    1431             : };
    1432             : 
    1433             : struct i915_gpu_error {
    1434             :         /* For hangcheck timer */
    1435             : #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
    1436             : #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
    1437             :         /* Hang gpu twice in this window and your context gets banned */
    1438             : #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
    1439             : 
    1440             :         struct workqueue_struct *hangcheck_wq;
    1441             :         struct delayed_work hangcheck_work;
    1442             : 
    1443             :         /* For reset and error_state handling. */
    1444             :         spinlock_t lock;
    1445             :         /* Protected by the above dev->gpu_error.lock. */
    1446             :         struct drm_i915_error_state *first_error;
    1447             : 
    1448             :         unsigned long missed_irq_rings;
    1449             : 
    1450             :         /**
    1451             :          * State variable controlling the reset flow and count
    1452             :          *
    1453             :          * This is a counter which gets incremented when reset is triggered,
    1454             :          * and again when reset has been handled. So odd values (lowest bit set)
    1455             :          * means that reset is in progress and even values that
    1456             :          * (reset_counter >> 1):th reset was successfully completed.
    1457             :          *
    1458             :          * If reset is not completed succesfully, the I915_WEDGE bit is
    1459             :          * set meaning that hardware is terminally sour and there is no
    1460             :          * recovery. All waiters on the reset_queue will be woken when
    1461             :          * that happens.
    1462             :          *
    1463             :          * This counter is used by the wait_seqno code to notice that reset
    1464             :          * event happened and it needs to restart the entire ioctl (since most
    1465             :          * likely the seqno it waited for won't ever signal anytime soon).
    1466             :          *
    1467             :          * This is important for lock-free wait paths, where no contended lock
    1468             :          * naturally enforces the correct ordering between the bail-out of the
    1469             :          * waiter and the gpu reset work code.
    1470             :          */
    1471             :         atomic_t reset_counter;
    1472             : 
    1473             : #define I915_RESET_IN_PROGRESS_FLAG     1
    1474             : #define I915_WEDGED                     (1 << 31)
    1475             : 
    1476             :         /**
    1477             :          * Waitqueue to signal when the reset has completed. Used by clients
    1478             :          * that wait for dev_priv->mm.wedged to settle.
    1479             :          */
    1480             :         wait_queue_head_t reset_queue;
    1481             : 
    1482             :         /* Userspace knobs for gpu hang simulation;
    1483             :          * combines both a ring mask, and extra flags
    1484             :          */
    1485             :         u32 stop_rings;
    1486             : #define I915_STOP_RING_ALLOW_BAN       (1 << 31)
    1487             : #define I915_STOP_RING_ALLOW_WARN      (1 << 30)
    1488             : 
    1489             :         /* For missed irq/seqno simulation. */
    1490             :         unsigned int test_irq_rings;
    1491             : 
    1492             :         /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset   */
    1493             :         bool reload_in_reset;
    1494             : };
    1495             : 
    1496             : enum modeset_restore {
    1497             :         MODESET_ON_LID_OPEN,
    1498             :         MODESET_DONE,
    1499             :         MODESET_SUSPENDED,
    1500             : };
    1501             : 
    1502             : #define DP_AUX_A 0x40
    1503             : #define DP_AUX_B 0x10
    1504             : #define DP_AUX_C 0x20
    1505             : #define DP_AUX_D 0x30
    1506             : 
    1507             : #define DDC_PIN_B  0x05
    1508             : #define DDC_PIN_C  0x04
    1509             : #define DDC_PIN_D  0x06
    1510             : 
    1511             : struct ddi_vbt_port_info {
    1512             :         /*
    1513             :          * This is an index in the HDMI/DVI DDI buffer translation table.
    1514             :          * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
    1515             :          * populate this field.
    1516             :          */
    1517             : #define HDMI_LEVEL_SHIFT_UNKNOWN        0xff
    1518             :         uint8_t hdmi_level_shift;
    1519             : 
    1520             :         uint8_t supports_dvi:1;
    1521             :         uint8_t supports_hdmi:1;
    1522             :         uint8_t supports_dp:1;
    1523             : 
    1524             :         uint8_t alternate_aux_channel;
    1525             :         uint8_t alternate_ddc_pin;
    1526             : 
    1527             :         uint8_t dp_boost_level;
    1528             :         uint8_t hdmi_boost_level;
    1529             : };
    1530             : 
    1531             : enum psr_lines_to_wait {
    1532             :         PSR_0_LINES_TO_WAIT = 0,
    1533             :         PSR_1_LINE_TO_WAIT,
    1534             :         PSR_4_LINES_TO_WAIT,
    1535             :         PSR_8_LINES_TO_WAIT
    1536             : };
    1537             : 
    1538             : struct intel_vbt_data {
    1539             :         struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
    1540             :         struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
    1541             : 
    1542             :         /* Feature bits */
    1543             :         unsigned int int_tv_support:1;
    1544             :         unsigned int lvds_dither:1;
    1545             :         unsigned int lvds_vbt:1;
    1546             :         unsigned int int_crt_support:1;
    1547             :         unsigned int lvds_use_ssc:1;
    1548             :         unsigned int display_clock_mode:1;
    1549             :         unsigned int fdi_rx_polarity_inverted:1;
    1550             :         unsigned int has_mipi:1;
    1551             :         int lvds_ssc_freq;
    1552             :         unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
    1553             : 
    1554             :         enum drrs_support_type drrs_type;
    1555             : 
    1556             :         /* eDP */
    1557             :         int edp_rate;
    1558             :         int edp_lanes;
    1559             :         int edp_preemphasis;
    1560             :         int edp_vswing;
    1561             :         bool edp_initialized;
    1562             :         bool edp_support;
    1563             :         int edp_bpp;
    1564             :         struct edp_power_seq edp_pps;
    1565             : 
    1566             :         struct {
    1567             :                 bool full_link;
    1568             :                 bool require_aux_wakeup;
    1569             :                 int idle_frames;
    1570             :                 enum psr_lines_to_wait lines_to_wait;
    1571             :                 int tp1_wakeup_time;
    1572             :                 int tp2_tp3_wakeup_time;
    1573             :         } psr;
    1574             : 
    1575             :         struct {
    1576             :                 u16 pwm_freq_hz;
    1577             :                 bool present;
    1578             :                 bool active_low_pwm;
    1579             :                 u8 min_brightness;      /* min_brightness/255 of max */
    1580             :         } backlight;
    1581             : 
    1582             : #ifndef __linux__
    1583             :         /* MIPI DSI */
    1584             :         struct {
    1585             :                 u16 port;
    1586             :                 u16 panel_id;
    1587             :                 struct mipi_config *config;
    1588             :                 struct mipi_pps_data *pps;
    1589             :                 u8 seq_version;
    1590             :                 u32 size;
    1591             :                 u8 *data;
    1592             :                 u8 *sequence[MIPI_SEQ_MAX];
    1593             :         } dsi;
    1594             : #endif
    1595             : 
    1596             :         int crt_ddc_pin;
    1597             : 
    1598             :         int child_dev_num;
    1599             :         union child_device_config *child_dev;
    1600             : 
    1601             :         struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
    1602             : };
    1603             : 
    1604             : enum intel_ddb_partitioning {
    1605             :         INTEL_DDB_PART_1_2,
    1606             :         INTEL_DDB_PART_5_6, /* IVB+ */
    1607             : };
    1608             : 
    1609             : struct intel_wm_level {
    1610             :         bool enable;
    1611             :         uint32_t pri_val;
    1612             :         uint32_t spr_val;
    1613             :         uint32_t cur_val;
    1614             :         uint32_t fbc_val;
    1615             : };
    1616             : 
    1617             : struct ilk_wm_values {
    1618             :         uint32_t wm_pipe[3];
    1619             :         uint32_t wm_lp[3];
    1620             :         uint32_t wm_lp_spr[3];
    1621             :         uint32_t wm_linetime[3];
    1622             :         bool enable_fbc_wm;
    1623             :         enum intel_ddb_partitioning partitioning;
    1624             : };
    1625             : 
    1626             : struct vlv_pipe_wm {
    1627             :         uint16_t primary;
    1628             :         uint16_t sprite[2];
    1629             :         uint8_t cursor;
    1630             : };
    1631             : 
    1632             : struct vlv_sr_wm {
    1633             :         uint16_t plane;
    1634             :         uint8_t cursor;
    1635             : };
    1636             : 
    1637             : struct vlv_wm_values {
    1638             :         struct vlv_pipe_wm pipe[3];
    1639             :         struct vlv_sr_wm sr;
    1640             :         struct {
    1641             :                 uint8_t cursor;
    1642             :                 uint8_t sprite[2];
    1643             :                 uint8_t primary;
    1644             :         } ddl[3];
    1645             :         uint8_t level;
    1646             :         bool cxsr;
    1647             : };
    1648             : 
    1649             : struct skl_ddb_entry {
    1650             :         uint16_t start, end;    /* in number of blocks, 'end' is exclusive */
    1651             : };
    1652             : 
    1653           0 : static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
    1654             : {
    1655           0 :         return entry->end - entry->start;
    1656             : }
    1657             : 
    1658           0 : static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
    1659             :                                        const struct skl_ddb_entry *e2)
    1660             : {
    1661           0 :         if (e1->start == e2->start && e1->end == e2->end)
    1662           0 :                 return true;
    1663             : 
    1664           0 :         return false;
    1665           0 : }
    1666             : 
    1667             : struct skl_ddb_allocation {
    1668             :         struct skl_ddb_entry pipe[I915_MAX_PIPES];
    1669             :         struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
    1670             :         struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
    1671             : };
    1672             : 
    1673             : struct skl_wm_values {
    1674             :         bool dirty[I915_MAX_PIPES];
    1675             :         struct skl_ddb_allocation ddb;
    1676             :         uint32_t wm_linetime[I915_MAX_PIPES];
    1677             :         uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
    1678             :         uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
    1679             : };
    1680             : 
    1681             : struct skl_wm_level {
    1682             :         bool plane_en[I915_MAX_PLANES];
    1683             :         uint16_t plane_res_b[I915_MAX_PLANES];
    1684             :         uint8_t plane_res_l[I915_MAX_PLANES];
    1685             : };
    1686             : 
    1687             : /*
    1688             :  * This struct helps tracking the state needed for runtime PM, which puts the
    1689             :  * device in PCI D3 state. Notice that when this happens, nothing on the
    1690             :  * graphics device works, even register access, so we don't get interrupts nor
    1691             :  * anything else.
    1692             :  *
    1693             :  * Every piece of our code that needs to actually touch the hardware needs to
    1694             :  * either call intel_runtime_pm_get or call intel_display_power_get with the
    1695             :  * appropriate power domain.
    1696             :  *
    1697             :  * Our driver uses the autosuspend delay feature, which means we'll only really
    1698             :  * suspend if we stay with zero refcount for a certain amount of time. The
    1699             :  * default value is currently very conservative (see intel_runtime_pm_enable), but
    1700             :  * it can be changed with the standard runtime PM files from sysfs.
    1701             :  *
    1702             :  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
    1703             :  * goes back to false exactly before we reenable the IRQs. We use this variable
    1704             :  * to check if someone is trying to enable/disable IRQs while they're supposed
    1705             :  * to be disabled. This shouldn't happen and we'll print some error messages in
    1706             :  * case it happens.
    1707             :  *
    1708             :  * For more, read the Documentation/power/runtime_pm.txt.
    1709             :  */
    1710             : struct i915_runtime_pm {
    1711             :         bool suspended;
    1712             :         bool irqs_enabled;
    1713             : };
    1714             : 
    1715             : enum intel_pipe_crc_source {
    1716             :         INTEL_PIPE_CRC_SOURCE_NONE,
    1717             :         INTEL_PIPE_CRC_SOURCE_PLANE1,
    1718             :         INTEL_PIPE_CRC_SOURCE_PLANE2,
    1719             :         INTEL_PIPE_CRC_SOURCE_PF,
    1720             :         INTEL_PIPE_CRC_SOURCE_PIPE,
    1721             :         /* TV/DP on pre-gen5/vlv can't use the pipe source. */
    1722             :         INTEL_PIPE_CRC_SOURCE_TV,
    1723             :         INTEL_PIPE_CRC_SOURCE_DP_B,
    1724             :         INTEL_PIPE_CRC_SOURCE_DP_C,
    1725             :         INTEL_PIPE_CRC_SOURCE_DP_D,
    1726             :         INTEL_PIPE_CRC_SOURCE_AUTO,
    1727             :         INTEL_PIPE_CRC_SOURCE_MAX,
    1728             : };
    1729             : 
    1730             : struct intel_pipe_crc_entry {
    1731             :         uint32_t frame;
    1732             :         uint32_t crc[5];
    1733             : };
    1734             : 
    1735             : #define INTEL_PIPE_CRC_ENTRIES_NR       128
    1736             : struct intel_pipe_crc {
    1737             :         spinlock_t lock;
    1738             :         bool opened;            /* exclusive access to the result file */
    1739             :         struct intel_pipe_crc_entry *entries;
    1740             :         enum intel_pipe_crc_source source;
    1741             :         int head, tail;
    1742             :         wait_queue_head_t wq;
    1743             : };
    1744             : 
    1745             : struct i915_frontbuffer_tracking {
    1746             :         struct rwlock lock;
    1747             : 
    1748             :         /*
    1749             :          * Tracking bits for delayed frontbuffer flushing du to gpu activity or
    1750             :          * scheduled flips.
    1751             :          */
    1752             :         unsigned busy_bits;
    1753             :         unsigned flip_bits;
    1754             : };
    1755             : 
    1756             : struct i915_wa_reg {
    1757             :         u32 addr;
    1758             :         u32 value;
    1759             :         /* bitmask representing WA bits */
    1760             :         u32 mask;
    1761             : };
    1762             : 
    1763             : #define I915_MAX_WA_REGS 16
    1764             : 
    1765             : struct i915_workarounds {
    1766             :         struct i915_wa_reg reg[I915_MAX_WA_REGS];
    1767             :         u32 count;
    1768             : };
    1769             : 
    1770             : struct i915_virtual_gpu {
    1771             :         bool active;
    1772             : };
    1773             : 
    1774             : struct i915_execbuffer_params {
    1775             :         struct drm_device               *dev;
    1776             :         struct drm_file                 *file;
    1777             :         uint32_t                        dispatch_flags;
    1778             :         uint32_t                        args_batch_start_offset;
    1779             :         uint64_t                        batch_obj_vm_offset;
    1780             :         struct intel_engine_cs          *ring;
    1781             :         struct drm_i915_gem_object      *batch_obj;
    1782             :         struct intel_context            *ctx;
    1783             :         struct drm_i915_gem_request     *request;
    1784             : };
    1785             : 
    1786             : struct inteldrm_softc {
    1787             :         struct device sc_dev;
    1788             :         bus_dma_tag_t dmat;
    1789             :         bus_space_tag_t bst;
    1790             :         struct agp_map *agph;
    1791             :         bus_space_handle_t opregion_ioh;
    1792             : 
    1793             :         struct drm_device *dev;
    1794             :         struct pool objects;
    1795             :         struct pool vmas;
    1796             :         struct pool requests;
    1797             : 
    1798             :         const struct intel_device_info info;
    1799             : 
    1800             :         int relative_constants_mode;
    1801             : 
    1802             :         pci_chipset_tag_t pc;
    1803             :         pcitag_t tag;
    1804             :         struct extent *memex;
    1805             :         pci_intr_handle_t ih;
    1806             :         void *irqh;
    1807             : 
    1808             :         struct vga_pci_bar bar;
    1809             :         struct vga_pci_bar *regs;
    1810             : 
    1811             :         int nscreens;
    1812             :         void (*switchcb)(void *, int, int);
    1813             :         void *switchcbarg;
    1814             :         void *switchcookie;
    1815             :         struct task switchtask;
    1816             :         struct rasops_info ro;
    1817             : 
    1818             :         struct task burner_task;
    1819             :         int burner_fblank;
    1820             : 
    1821             :         struct backlight_device *backlight;
    1822             : 
    1823             :         struct intel_uncore uncore;
    1824             : 
    1825             :         struct intel_guc guc;
    1826             : 
    1827             :         struct intel_csr csr;
    1828             : 
    1829             :         /* Display CSR-related protection */
    1830             :         struct rwlock csr_lock;
    1831             : 
    1832             :         struct intel_gmbus gmbus[GMBUS_NUM_PINS];
    1833             : 
    1834             :         /** gmbus_mutex protects against concurrent usage of the single hw gmbus
    1835             :          * controller on different i2c buses. */
    1836             :         struct rwlock gmbus_mutex;
    1837             : 
    1838             :         /**
    1839             :          * Base address of the gmbus and gpio block.
    1840             :          */
    1841             :         uint32_t gpio_mmio_base;
    1842             : 
    1843             :         /* MMIO base address for MIPI regs */
    1844             :         uint32_t mipi_mmio_base;
    1845             : 
    1846             :         wait_queue_head_t gmbus_wait_queue;
    1847             : 
    1848             :         struct pci_dev *bridge_dev;
    1849             :         struct intel_engine_cs ring[I915_NUM_RINGS];
    1850             :         struct drm_i915_gem_object *semaphore_obj;
    1851             :         uint32_t last_seqno, next_seqno;
    1852             : 
    1853             :         struct drm_dma_handle *status_page_dmah;
    1854             :         struct resource mch_res;
    1855             :         union flush {
    1856             :                 struct {
    1857             :                         bus_space_tag_t         bst;
    1858             :                         bus_space_handle_t      bsh;
    1859             :                 } i9xx;
    1860             :                 struct {
    1861             :                         bus_dma_segment_t       seg;
    1862             :                         caddr_t                 kva;
    1863             :                 } i8xx;
    1864             :         }                        ifp;
    1865             :         struct vm_page *pgs;
    1866             : 
    1867             :         /* protects the irq masks */
    1868             :         spinlock_t irq_lock;
    1869             : 
    1870             :         /* protects the mmio flip data */
    1871             :         spinlock_t mmio_flip_lock;
    1872             : 
    1873             :         bool display_irqs_enabled;
    1874             : 
    1875             : #ifdef noyet
    1876             :         /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
    1877             :         struct pm_qos_request pm_qos;
    1878             : #endif
    1879             : 
    1880             :         /* Sideband mailbox protection */
    1881             :         struct rwlock sb_lock;
    1882             : 
    1883             :         /** Cached value of IMR to avoid reads in updating the bitfield */
    1884             :         union {
    1885             :                 u32 irq_mask;
    1886             :                 u32 de_irq_mask[I915_MAX_PIPES];
    1887             :         };
    1888             :         u32 gt_irq_mask;
    1889             :         u32 pm_irq_mask;
    1890             :         u32 pm_rps_events;
    1891             :         u32 pipestat_irq_mask[I915_MAX_PIPES];
    1892             : 
    1893             :         struct i915_hotplug hotplug;
    1894             :         struct i915_fbc fbc;
    1895             :         struct i915_drrs drrs;
    1896             :         struct intel_opregion opregion;
    1897             :         struct intel_vbt_data vbt;
    1898             : 
    1899             :         bool preserve_bios_swizzle;
    1900             : 
    1901             :         /* overlay */
    1902             :         struct intel_overlay *overlay;
    1903             : 
    1904             :         /* backlight registers and fields in struct intel_panel */
    1905             :         struct rwlock backlight_lock;
    1906             : 
    1907             :         /* LVDS info */
    1908             :         bool no_aux_handshake;
    1909             : 
    1910             :         /* protects panel power sequencer state */
    1911             :         struct rwlock pps_mutex;
    1912             : 
    1913             :         struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
    1914             :         int num_fence_regs; /* 8 on pre-965, 16 otherwise */
    1915             : 
    1916             :         unsigned int fsb_freq, mem_freq, is_ddr3;
    1917             :         unsigned int skl_boot_cdclk;
    1918             :         unsigned int cdclk_freq, max_cdclk_freq;
    1919             :         unsigned int max_dotclk_freq;
    1920             :         unsigned int hpll_freq;
    1921             :         unsigned int czclk_freq;
    1922             : 
    1923             :         /**
    1924             :          * wq - Driver workqueue for GEM.
    1925             :          *
    1926             :          * NOTE: Work items scheduled here are not allowed to grab any modeset
    1927             :          * locks, for otherwise the flushing done in the pageflip code will
    1928             :          * result in deadlocks.
    1929             :          */
    1930             :         struct workqueue_struct *wq;
    1931             : 
    1932             :         /* Display functions */
    1933             :         struct drm_i915_display_funcs display;
    1934             : 
    1935             :         /* PCH chipset type */
    1936             :         enum intel_pch pch_type;
    1937             :         unsigned short pch_id;
    1938             : 
    1939             :         unsigned long quirks;
    1940             : 
    1941             :         enum modeset_restore modeset_restore;
    1942             :         struct rwlock modeset_restore_lock;
    1943             : 
    1944             :         struct list_head vm_list; /* Global list of all address spaces */
    1945             :         struct i915_gtt gtt; /* VM representing the global address space */
    1946             : 
    1947             :         struct i915_gem_mm mm;
    1948             :         DECLARE_HASHTABLE(mm_structs, 7);
    1949             :         struct rwlock mm_lock;
    1950             : 
    1951             :         /* Kernel Modesetting */
    1952             : 
    1953             :         struct sdvo_device_mapping sdvo_mappings[2];
    1954             : 
    1955             :         struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
    1956             :         struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
    1957             :         wait_queue_head_t pending_flip_queue;
    1958             : 
    1959             : #ifdef CONFIG_DEBUG_FS
    1960             :         struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
    1961             : #endif
    1962             : 
    1963             :         int num_shared_dpll;
    1964             :         struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
    1965             :         int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
    1966             : 
    1967             :         struct i915_workarounds workarounds;
    1968             : 
    1969             :         /* Reclocking support */
    1970             :         bool render_reclock_avail;
    1971             : 
    1972             :         struct i915_frontbuffer_tracking fb_tracking;
    1973             : 
    1974             :         u16 orig_clock;
    1975             : 
    1976             :         bool mchbar_need_disable;
    1977             : 
    1978             :         struct intel_l3_parity l3_parity;
    1979             : 
    1980             :         /* Cannot be determined by PCIID. You must always read a register. */
    1981             :         size_t ellc_size;
    1982             : 
    1983             :         /* gen6+ rps state */
    1984             :         struct intel_gen6_power_mgmt rps;
    1985             : 
    1986             :         /* ilk-only ips/rps state. Everything in here is protected by the global
    1987             :          * mchdev_lock in intel_pm.c */
    1988             :         struct intel_ilk_power_mgmt ips;
    1989             : 
    1990             :         struct i915_power_domains power_domains;
    1991             : 
    1992             :         struct i915_psr psr;
    1993             : 
    1994             :         struct i915_gpu_error gpu_error;
    1995             : 
    1996             :         struct drm_i915_gem_object *vlv_pctx;
    1997             : 
    1998             : #ifdef CONFIG_DRM_FBDEV_EMULATION
    1999             :         /* list of fbdev register on this device */
    2000             :         struct intel_fbdev *fbdev;
    2001             :         struct work_struct fbdev_suspend_work;
    2002             : #endif
    2003             : 
    2004             :         struct drm_property *broadcast_rgb_property;
    2005             :         struct drm_property *force_audio_property;
    2006             : 
    2007             :         /* hda/i915 audio component */
    2008             :         struct i915_audio_component *audio_component;
    2009             :         bool audio_component_registered;
    2010             :         /**
    2011             :          * av_mutex - mutex for audio/video sync
    2012             :          *
    2013             :          */
    2014             :         struct rwlock av_mutex;
    2015             : 
    2016             :         uint32_t hw_context_size;
    2017             :         struct list_head context_list;
    2018             : 
    2019             :         u32 fdi_rx_config;
    2020             : 
    2021             :         u32 chv_phy_control;
    2022             : 
    2023             :         u32 suspend_count;
    2024             :         struct i915_suspend_saved_registers regfile;
    2025             :         struct vlv_s0ix_state vlv_s0ix_state;
    2026             : 
    2027             :         struct {
    2028             :                 /*
    2029             :                  * Raw watermark latency values:
    2030             :                  * in 0.1us units for WM0,
    2031             :                  * in 0.5us units for WM1+.
    2032             :                  */
    2033             :                 /* primary */
    2034             :                 uint16_t pri_latency[5];
    2035             :                 /* sprite */
    2036             :                 uint16_t spr_latency[5];
    2037             :                 /* cursor */
    2038             :                 uint16_t cur_latency[5];
    2039             :                 /*
    2040             :                  * Raw watermark memory latency values
    2041             :                  * for SKL for all 8 levels
    2042             :                  * in 1us units.
    2043             :                  */
    2044             :                 uint16_t skl_latency[8];
    2045             : 
    2046             :                 /*
    2047             :                  * The skl_wm_values structure is a bit too big for stack
    2048             :                  * allocation, so we keep the staging struct where we store
    2049             :                  * intermediate results here instead.
    2050             :                  */
    2051             :                 struct skl_wm_values skl_results;
    2052             : 
    2053             :                 /* current hardware state */
    2054             :                 union {
    2055             :                         struct ilk_wm_values hw;
    2056             :                         struct skl_wm_values skl_hw;
    2057             :                         struct vlv_wm_values vlv;
    2058             :                 };
    2059             : 
    2060             :                 uint8_t max_level;
    2061             :         } wm;
    2062             : 
    2063             :         struct i915_runtime_pm pm;
    2064             : 
    2065             :         /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
    2066             :         struct {
    2067             :                 int (*execbuf_submit)(struct i915_execbuffer_params *params,
    2068             :                                       struct drm_i915_gem_execbuffer2 *args,
    2069             :                                       struct list_head *vmas);
    2070             :                 int (*init_rings)(struct drm_device *dev);
    2071             :                 void (*cleanup_ring)(struct intel_engine_cs *ring);
    2072             :                 void (*stop_ring)(struct intel_engine_cs *ring);
    2073             :         } gt;
    2074             : 
    2075             :         bool edp_low_vswing;
    2076             : 
    2077             :         /* perform PHY state sanity checks? */
    2078             :         bool chv_phy_assert[2];
    2079             : 
    2080             :         /*
    2081             :          * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
    2082             :          * will be rejected. Instead look for a better place.
    2083             :          */
    2084             : };
    2085             : 
    2086           0 : static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
    2087             : {
    2088           0 :         return dev->dev_private;
    2089             : }
    2090             : 
    2091             : #ifdef __linux__
    2092             : static inline struct drm_i915_private *dev_to_i915(struct device *dev)
    2093             : {
    2094             :         return to_i915(dev_get_drvdata(dev));
    2095             : }
    2096             : #endif
    2097             : 
    2098             : static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
    2099             : {
    2100             :         return container_of(guc, struct drm_i915_private, guc);
    2101             : }
    2102             : 
    2103             : /* Iterate over initialised rings */
    2104             : #define for_each_ring(ring__, dev_priv__, i__) \
    2105             :         for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
    2106             :                 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
    2107             : 
    2108             : enum hdmi_force_audio {
    2109             :         HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
    2110             :         HDMI_AUDIO_OFF,                 /* force turn off HDMI audio */
    2111             :         HDMI_AUDIO_AUTO,                /* trust EDID */
    2112             :         HDMI_AUDIO_ON,                  /* force turn on HDMI audio */
    2113             : };
    2114             : 
    2115             : #define I915_GTT_OFFSET_NONE ((u32)-1)
    2116             : 
    2117             : struct drm_i915_gem_object_ops {
    2118             :         /* Interface between the GEM object and its backing storage.
    2119             :          * get_pages() is called once prior to the use of the associated set
    2120             :          * of pages before to binding them into the GTT, and put_pages() is
    2121             :          * called after we no longer need them. As we expect there to be
    2122             :          * associated cost with migrating pages between the backing storage
    2123             :          * and making them available for the GPU (e.g. clflush), we may hold
    2124             :          * onto the pages after they are no longer referenced by the GPU
    2125             :          * in case they may be used again shortly (for example migrating the
    2126             :          * pages to a different memory domain within the GTT). put_pages()
    2127             :          * will therefore most likely be called when the object itself is
    2128             :          * being released or under memory pressure (where we attempt to
    2129             :          * reap pages for the shrinker).
    2130             :          */
    2131             :         int (*get_pages)(struct drm_i915_gem_object *);
    2132             :         void (*put_pages)(struct drm_i915_gem_object *);
    2133             :         int (*dmabuf_export)(struct drm_i915_gem_object *);
    2134             :         void (*release)(struct drm_i915_gem_object *);
    2135             : };
    2136             : 
    2137             : /*
    2138             :  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
    2139             :  * considered to be the frontbuffer for the given plane interface-wise. This
    2140             :  * doesn't mean that the hw necessarily already scans it out, but that any
    2141             :  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
    2142             :  *
    2143             :  * We have one bit per pipe and per scanout plane type.
    2144             :  */
    2145             : #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
    2146             : #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
    2147             : #define INTEL_FRONTBUFFER_BITS \
    2148             :         (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
    2149             : #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
    2150             :         (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
    2151             : #define INTEL_FRONTBUFFER_CURSOR(pipe) \
    2152             :         (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
    2153             : #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
    2154             :         (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
    2155             : #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
    2156             :         (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
    2157             : #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
    2158             :         (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
    2159             : 
    2160             : struct drm_i915_gem_object {
    2161             :         struct drm_gem_object base;
    2162             : 
    2163             :         const struct drm_i915_gem_object_ops *ops;
    2164             : 
    2165             :         /** List of VMAs backed by this object */
    2166             :         struct list_head vma_list;
    2167             : 
    2168             :         /** Stolen memory for this object, instead of being backed by shmem. */
    2169             :         struct drm_mm_node *stolen;
    2170             :         struct list_head global_list;
    2171             : 
    2172             :         struct list_head ring_list[I915_NUM_RINGS];
    2173             :         /** Used in execbuf to temporarily hold a ref */
    2174             :         struct list_head obj_exec_link;
    2175             : 
    2176             :         struct list_head batch_pool_link;
    2177             : 
    2178             :         /**
    2179             :          * This is set if the object is on the active lists (has pending
    2180             :          * rendering and so a non-zero seqno), and is not set if it i s on
    2181             :          * inactive (ready to be unbound) list.
    2182             :          */
    2183             :         unsigned int active:I915_NUM_RINGS;
    2184             : 
    2185             :         /**
    2186             :          * This is set if the object has been written to since last bound
    2187             :          * to the GTT
    2188             :          */
    2189             :         unsigned int dirty:1;
    2190             : 
    2191             :         /**
    2192             :          * Fence register bits (if any) for this object.  Will be set
    2193             :          * as needed when mapped into the GTT.
    2194             :          * Protected by dev->struct_mutex.
    2195             :          */
    2196             :         signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
    2197             : 
    2198             :         /**
    2199             :          * Advice: are the backing pages purgeable?
    2200             :          */
    2201             :         unsigned int madv:2;
    2202             : 
    2203             :         /**
    2204             :          * Current tiling mode for the object.
    2205             :          */
    2206             :         unsigned int tiling_mode:2;
    2207             :         /**
    2208             :          * Whether the tiling parameters for the currently associated fence
    2209             :          * register have changed. Note that for the purposes of tracking
    2210             :          * tiling changes we also treat the unfenced register, the register
    2211             :          * slot that the object occupies whilst it executes a fenced
    2212             :          * command (such as BLT on gen2/3), as a "fence".
    2213             :          */
    2214             :         unsigned int fence_dirty:1;
    2215             : 
    2216             :         /**
    2217             :          * Is the object at the current location in the gtt mappable and
    2218             :          * fenceable? Used to avoid costly recalculations.
    2219             :          */
    2220             :         unsigned int map_and_fenceable:1;
    2221             : 
    2222             :         /**
    2223             :          * Whether the current gtt mapping needs to be mappable (and isn't just
    2224             :          * mappable by accident). Track pin and fault separate for a more
    2225             :          * accurate mappable working set.
    2226             :          */
    2227             :         unsigned int fault_mappable:1;
    2228             : 
    2229             :         /*
    2230             :          * Is the object to be mapped as read-only to the GPU
    2231             :          * Only honoured if hardware has relevant pte bit
    2232             :          */
    2233             :         unsigned long gt_ro:1;
    2234             :         unsigned int cache_level:3;
    2235             :         unsigned int cache_dirty:1;
    2236             : 
    2237             :         unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
    2238             : 
    2239             :         unsigned int pin_display;
    2240             : 
    2241             :         struct sg_table *pages;
    2242             :         int pages_pin_count;
    2243             :         struct get_page {
    2244             :                 struct scatterlist *sg;
    2245             :                 int last;
    2246             :         } get_page;
    2247             : 
    2248             :         /* prime dma-buf support */
    2249             :         void *dma_buf_vmapping;
    2250             :         int vmapping_count;
    2251             : 
    2252             :         /** Breadcrumb of last rendering to the buffer.
    2253             :          * There can only be one writer, but we allow for multiple readers.
    2254             :          * If there is a writer that necessarily implies that all other
    2255             :          * read requests are complete - but we may only be lazily clearing
    2256             :          * the read requests. A read request is naturally the most recent
    2257             :          * request on a ring, so we may have two different write and read
    2258             :          * requests on one ring where the write request is older than the
    2259             :          * read request. This allows for the CPU to read from an active
    2260             :          * buffer by only waiting for the write to complete.
    2261             :          * */
    2262             :         struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
    2263             :         struct drm_i915_gem_request *last_write_req;
    2264             :         /** Breadcrumb of last fenced GPU access to the buffer. */
    2265             :         struct drm_i915_gem_request *last_fenced_req;
    2266             : 
    2267             :         /** Current tiling stride for the object, if it's tiled. */
    2268             :         uint32_t stride;
    2269             : 
    2270             :         /** References from framebuffers, locks out tiling changes. */
    2271             :         unsigned long framebuffer_references;
    2272             : 
    2273             :         /** Record of address bit 17 of each page at last unbind. */
    2274             :         unsigned long *bit_17;
    2275             : 
    2276             :         struct i915_gem_userptr {
    2277             :                 uintptr_t ptr;
    2278             :                 unsigned read_only :1;
    2279             :                 unsigned workers :4;
    2280             : #define I915_GEM_USERPTR_MAX_WORKERS 15
    2281             : 
    2282             :                 struct i915_mm_struct *mm;
    2283             :                 struct i915_mmu_object *mmu_object;
    2284             :                 struct work_struct *work;
    2285             :         } userptr;
    2286             : 
    2287             :         /** for phys allocated objects */
    2288             :         drm_dma_handle_t *phys_handle;
    2289             : };
    2290             : #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
    2291             : 
    2292             : void i915_gem_track_fb(struct drm_i915_gem_object *old,
    2293             :                        struct drm_i915_gem_object *new,
    2294             :                        unsigned frontbuffer_bits);
    2295             : 
    2296             : /**
    2297             :  * Request queue structure.
    2298             :  *
    2299             :  * The request queue allows us to note sequence numbers that have been emitted
    2300             :  * and may be associated with active buffers to be retired.
    2301             :  *
    2302             :  * By keeping this list, we can avoid having to do questionable sequence
    2303             :  * number comparisons on buffer last_read|write_seqno. It also allows an
    2304             :  * emission time to be associated with the request for tracking how far ahead
    2305             :  * of the GPU the submission is.
    2306             :  *
    2307             :  * The requests are reference counted, so upon creation they should have an
    2308             :  * initial reference taken using kref_init
    2309             :  */
    2310             : struct drm_i915_gem_request {
    2311             :         struct kref ref;
    2312             : 
    2313             :         /** On Which ring this request was generated */
    2314             :         struct drm_i915_private *i915;
    2315             :         struct intel_engine_cs *ring;
    2316             : 
    2317             :          /** GEM sequence number associated with the previous request,
    2318             :           * when the HWS breadcrumb is equal to this the GPU is processing
    2319             :           * this request.
    2320             :           */
    2321             :         u32 previous_seqno;
    2322             : 
    2323             :          /** GEM sequence number associated with this request,
    2324             :           * when the HWS breadcrumb is equal or greater than this the GPU
    2325             :           * has finished processing this request.
    2326             :           */
    2327             :         u32 seqno;
    2328             : 
    2329             :         /** Position in the ringbuffer of the start of the request */
    2330             :         u32 head;
    2331             : 
    2332             :         /**
    2333             :          * Position in the ringbuffer of the start of the postfix.
    2334             :          * This is required to calculate the maximum available ringbuffer
    2335             :          * space without overwriting the postfix.
    2336             :          */
    2337             :          u32 postfix;
    2338             : 
    2339             :         /** Position in the ringbuffer of the end of the whole request */
    2340             :         u32 tail;
    2341             : 
    2342             :         /**
    2343             :          * Context and ring buffer related to this request
    2344             :          * Contexts are refcounted, so when this request is associated with a
    2345             :          * context, we must increment the context's refcount, to guarantee that
    2346             :          * it persists while any request is linked to it. Requests themselves
    2347             :          * are also refcounted, so the request will only be freed when the last
    2348             :          * reference to it is dismissed, and the code in
    2349             :          * i915_gem_request_free() will then decrement the refcount on the
    2350             :          * context.
    2351             :          */
    2352             :         struct intel_context *ctx;
    2353             :         struct intel_ringbuffer *ringbuf;
    2354             : 
    2355             :         /** Batch buffer related to this request if any (used for
    2356             :             error state dump only) */
    2357             :         struct drm_i915_gem_object *batch_obj;
    2358             : 
    2359             :         /** Time at which this request was emitted, in jiffies. */
    2360             :         unsigned long emitted_jiffies;
    2361             : 
    2362             :         /** global list entry for this request */
    2363             :         struct list_head list;
    2364             : 
    2365             :         struct drm_i915_file_private *file_priv;
    2366             :         /** file_priv list entry for this request */
    2367             :         struct list_head client_list;
    2368             : 
    2369             :         /** process identifier submitting this request */
    2370             :         struct pid *pid;
    2371             : 
    2372             :         /**
    2373             :          * The ELSP only accepts two elements at a time, so we queue
    2374             :          * context/tail pairs on a given queue (ring->execlist_queue) until the
    2375             :          * hardware is available. The queue serves a double purpose: we also use
    2376             :          * it to keep track of the up to 2 contexts currently in the hardware
    2377             :          * (usually one in execution and the other queued up by the GPU): We
    2378             :          * only remove elements from the head of the queue when the hardware
    2379             :          * informs us that an element has been completed.
    2380             :          *
    2381             :          * All accesses to the queue are mediated by a spinlock
    2382             :          * (ring->execlist_lock).
    2383             :          */
    2384             : 
    2385             :         /** Execlist link in the submission queue.*/
    2386             :         struct list_head execlist_link;
    2387             : 
    2388             :         /** Execlists no. of times this request has been sent to the ELSP */
    2389             :         int elsp_submitted;
    2390             : 
    2391             : };
    2392             : 
    2393             : int i915_gem_request_alloc(struct intel_engine_cs *ring,
    2394             :                            struct intel_context *ctx,
    2395             :                            struct drm_i915_gem_request **req_out);
    2396             : void i915_gem_request_cancel(struct drm_i915_gem_request *req);
    2397             : void i915_gem_request_free(struct kref *req_ref);
    2398             : int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
    2399             :                                    struct drm_file *file);
    2400             : 
    2401             : static inline uint32_t
    2402           0 : i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
    2403             : {
    2404           0 :         return req ? req->seqno : 0;
    2405             : }
    2406             : 
    2407             : static inline struct intel_engine_cs *
    2408           0 : i915_gem_request_get_ring(struct drm_i915_gem_request *req)
    2409             : {
    2410           0 :         return req ? req->ring : NULL;
    2411             : }
    2412             : 
    2413             : static inline struct drm_i915_gem_request *
    2414           0 : i915_gem_request_reference(struct drm_i915_gem_request *req)
    2415             : {
    2416           0 :         if (req)
    2417           0 :                 kref_get(&req->ref);
    2418           0 :         return req;
    2419             : }
    2420             : 
    2421             : static inline void
    2422           0 : i915_gem_request_unreference(struct drm_i915_gem_request *req)
    2423             : {
    2424           0 :         WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
    2425           0 :         kref_put(&req->ref, i915_gem_request_free);
    2426           0 : }
    2427             : 
    2428             : static inline void
    2429           0 : i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
    2430             : {
    2431             :         struct drm_device *dev;
    2432             : 
    2433           0 :         if (!req)
    2434           0 :                 return;
    2435             : 
    2436           0 :         dev = req->ring->dev;
    2437           0 :         if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
    2438           0 :                 mutex_unlock(&dev->struct_mutex);
    2439           0 : }
    2440             : 
    2441           0 : static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
    2442             :                                            struct drm_i915_gem_request *src)
    2443             : {
    2444           0 :         if (src)
    2445           0 :                 i915_gem_request_reference(src);
    2446             : 
    2447           0 :         if (*pdst)
    2448           0 :                 i915_gem_request_unreference(*pdst);
    2449             : 
    2450           0 :         *pdst = src;
    2451           0 : }
    2452             : 
    2453             : /*
    2454             :  * XXX: i915_gem_request_completed should be here but currently needs the
    2455             :  * definition of i915_seqno_passed() which is below. It will be moved in
    2456             :  * a later patch when the call to i915_seqno_passed() is obsoleted...
    2457             :  */
    2458             : 
    2459             : /*
    2460             :  * A command that requires special handling by the command parser.
    2461             :  */
    2462             : struct drm_i915_cmd_descriptor {
    2463             :         /*
    2464             :          * Flags describing how the command parser processes the command.
    2465             :          *
    2466             :          * CMD_DESC_FIXED: The command has a fixed length if this is set,
    2467             :          *                 a length mask if not set
    2468             :          * CMD_DESC_SKIP: The command is allowed but does not follow the
    2469             :          *                standard length encoding for the opcode range in
    2470             :          *                which it falls
    2471             :          * CMD_DESC_REJECT: The command is never allowed
    2472             :          * CMD_DESC_REGISTER: The command should be checked against the
    2473             :          *                    register whitelist for the appropriate ring
    2474             :          * CMD_DESC_MASTER: The command is allowed if the submitting process
    2475             :          *                  is the DRM master
    2476             :          */
    2477             :         u32 flags;
    2478             : #define CMD_DESC_FIXED    (1<<0)
    2479             : #define CMD_DESC_SKIP     (1<<1)
    2480             : #define CMD_DESC_REJECT   (1<<2)
    2481             : #define CMD_DESC_REGISTER (1<<3)
    2482             : #define CMD_DESC_BITMASK  (1<<4)
    2483             : #define CMD_DESC_MASTER   (1<<5)
    2484             : 
    2485             :         /*
    2486             :          * The command's unique identification bits and the bitmask to get them.
    2487             :          * This isn't strictly the opcode field as defined in the spec and may
    2488             :          * also include type, subtype, and/or subop fields.
    2489             :          */
    2490             :         struct {
    2491             :                 u32 value;
    2492             :                 u32 mask;
    2493             :         } cmd;
    2494             : 
    2495             :         /*
    2496             :          * The command's length. The command is either fixed length (i.e. does
    2497             :          * not include a length field) or has a length field mask. The flag
    2498             :          * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
    2499             :          * a length mask. All command entries in a command table must include
    2500             :          * length information.
    2501             :          */
    2502             :         union {
    2503             :                 u32 fixed;
    2504             :                 u32 mask;
    2505             :         } length;
    2506             : 
    2507             :         /*
    2508             :          * Describes where to find a register address in the command to check
    2509             :          * against the ring's register whitelist. Only valid if flags has the
    2510             :          * CMD_DESC_REGISTER bit set.
    2511             :          *
    2512             :          * A non-zero step value implies that the command may access multiple
    2513             :          * registers in sequence (e.g. LRI), in that case step gives the
    2514             :          * distance in dwords between individual offset fields.
    2515             :          */
    2516             :         struct {
    2517             :                 u32 offset;
    2518             :                 u32 mask;
    2519             :                 u32 step;
    2520             :         } reg;
    2521             : 
    2522             : #define MAX_CMD_DESC_BITMASKS 3
    2523             :         /*
    2524             :          * Describes command checks where a particular dword is masked and
    2525             :          * compared against an expected value. If the command does not match
    2526             :          * the expected value, the parser rejects it. Only valid if flags has
    2527             :          * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
    2528             :          * are valid.
    2529             :          *
    2530             :          * If the check specifies a non-zero condition_mask then the parser
    2531             :          * only performs the check when the bits specified by condition_mask
    2532             :          * are non-zero.
    2533             :          */
    2534             :         struct {
    2535             :                 u32 offset;
    2536             :                 u32 mask;
    2537             :                 u32 expected;
    2538             :                 u32 condition_offset;
    2539             :                 u32 condition_mask;
    2540             :         } bits[MAX_CMD_DESC_BITMASKS];
    2541             : };
    2542             : 
    2543             : /*
    2544             :  * A table of commands requiring special handling by the command parser.
    2545             :  *
    2546             :  * Each ring has an array of tables. Each table consists of an array of command
    2547             :  * descriptors, which must be sorted with command opcodes in ascending order.
    2548             :  */
    2549             : struct drm_i915_cmd_table {
    2550             :         const struct drm_i915_cmd_descriptor *table;
    2551             :         int count;
    2552             : };
    2553             : 
    2554             : /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
    2555             : #define __I915__(p) ({ \
    2556             :         struct drm_i915_private *__p; \
    2557             :         if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
    2558             :                 __p = (struct drm_i915_private *)p; \
    2559             :         else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
    2560             :                 __p = to_i915((struct drm_device *)p); \
    2561             :         else \
    2562             :                 BUILD_BUG(); \
    2563             :         __p; \
    2564             : })
    2565             : #define INTEL_INFO(p)   (&__I915__(p)->info)
    2566             : #define INTEL_DEVID(p)  (INTEL_INFO(p)->device_id)
    2567             : #define INTEL_REVID(p)  (__I915__(p)->dev->pdev->revision)
    2568             : 
    2569             : #define REVID_FOREVER   (0xff)
    2570             : 
    2571             : /*
    2572             :  * Return true if revision is in range [since,until] inclusive.
    2573             :  *
    2574             :  * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
    2575             :  */
    2576             : #define IS_REVID(p, since, until) \
    2577             :         (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
    2578             : 
    2579             : #define IS_I830(dev)            (INTEL_DEVID(dev) == 0x3577)
    2580             : #define IS_845G(dev)            (INTEL_DEVID(dev) == 0x2562)
    2581             : #define IS_I85X(dev)            (INTEL_INFO(dev)->is_i85x)
    2582             : #define IS_I865G(dev)           (INTEL_DEVID(dev) == 0x2572)
    2583             : #define IS_I915G(dev)           (INTEL_INFO(dev)->is_i915g)
    2584             : #define IS_I915GM(dev)          (INTEL_DEVID(dev) == 0x2592)
    2585             : #define IS_I945G(dev)           (INTEL_DEVID(dev) == 0x2772)
    2586             : #define IS_I945GM(dev)          (INTEL_INFO(dev)->is_i945gm)
    2587             : #define IS_BROADWATER(dev)      (INTEL_INFO(dev)->is_broadwater)
    2588             : #define IS_CRESTLINE(dev)       (INTEL_INFO(dev)->is_crestline)
    2589             : #define IS_GM45(dev)            (INTEL_DEVID(dev) == 0x2A42)
    2590             : #define IS_G4X(dev)             (INTEL_INFO(dev)->is_g4x)
    2591             : #define IS_PINEVIEW_G(dev)      (INTEL_DEVID(dev) == 0xa001)
    2592             : #define IS_PINEVIEW_M(dev)      (INTEL_DEVID(dev) == 0xa011)
    2593             : #define IS_PINEVIEW(dev)        (INTEL_INFO(dev)->is_pineview)
    2594             : #define IS_G33(dev)             (INTEL_INFO(dev)->is_g33)
    2595             : #define IS_IRONLAKE_M(dev)      (INTEL_DEVID(dev) == 0x0046)
    2596             : #define IS_IVYBRIDGE(dev)       (INTEL_INFO(dev)->is_ivybridge)
    2597             : #define IS_IVB_GT1(dev)         (INTEL_DEVID(dev) == 0x0156 || \
    2598             :                                  INTEL_DEVID(dev) == 0x0152 || \
    2599             :                                  INTEL_DEVID(dev) == 0x015a)
    2600             : #define IS_VALLEYVIEW(dev)      (INTEL_INFO(dev)->is_valleyview)
    2601             : #define IS_CHERRYVIEW(dev)      (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
    2602             : #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
    2603             : #define IS_BROADWELL(dev)       (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
    2604             : #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
    2605             : #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
    2606             : #define IS_KABYLAKE(dev)        (INTEL_INFO(dev)->is_kabylake)
    2607             : #define IS_MOBILE(dev)          (INTEL_INFO(dev)->is_mobile)
    2608             : #define IS_HSW_EARLY_SDV(dev)   (IS_HASWELL(dev) && \
    2609             :                                  (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
    2610             : #define IS_BDW_ULT(dev)         (IS_BROADWELL(dev) && \
    2611             :                                  ((INTEL_DEVID(dev) & 0xf) == 0x6 ||        \
    2612             :                                  (INTEL_DEVID(dev) & 0xf) == 0xb || \
    2613             :                                  (INTEL_DEVID(dev) & 0xf) == 0xe))
    2614             : /* ULX machines are also considered ULT. */
    2615             : #define IS_BDW_ULX(dev)         (IS_BROADWELL(dev) && \
    2616             :                                  (INTEL_DEVID(dev) & 0xf) == 0xe)
    2617             : #define IS_BDW_GT3(dev)         (IS_BROADWELL(dev) && \
    2618             :                                  (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
    2619             : #define IS_HSW_ULT(dev)         (IS_HASWELL(dev) && \
    2620             :                                  (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
    2621             : #define IS_HSW_GT3(dev)         (IS_HASWELL(dev) && \
    2622             :                                  (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
    2623             : /* ULX machines are also considered ULT. */
    2624             : #define IS_HSW_ULX(dev)         (INTEL_DEVID(dev) == 0x0A0E || \
    2625             :                                  INTEL_DEVID(dev) == 0x0A1E)
    2626             : #define IS_SKL_ULT(dev)         (INTEL_DEVID(dev) == 0x1906 || \
    2627             :                                  INTEL_DEVID(dev) == 0x1913 || \
    2628             :                                  INTEL_DEVID(dev) == 0x1916 || \
    2629             :                                  INTEL_DEVID(dev) == 0x1921 || \
    2630             :                                  INTEL_DEVID(dev) == 0x1926)
    2631             : #define IS_SKL_ULX(dev)         (INTEL_DEVID(dev) == 0x190E || \
    2632             :                                  INTEL_DEVID(dev) == 0x1915 || \
    2633             :                                  INTEL_DEVID(dev) == 0x191E)
    2634             : #define IS_KBL_ULT(dev)         (INTEL_DEVID(dev) == 0x5906 || \
    2635             :                                  INTEL_DEVID(dev) == 0x5913 || \
    2636             :                                  INTEL_DEVID(dev) == 0x5916 || \
    2637             :                                  INTEL_DEVID(dev) == 0x5921 || \
    2638             :                                  INTEL_DEVID(dev) == 0x5926)
    2639             : #define IS_KBL_ULX(dev)         (INTEL_DEVID(dev) == 0x590E || \
    2640             :                                  INTEL_DEVID(dev) == 0x5915 || \
    2641             :                                  INTEL_DEVID(dev) == 0x591E)
    2642             : #define IS_SKL_GT3(dev)         (IS_SKYLAKE(dev) && \
    2643             :                                  (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
    2644             : #define IS_SKL_GT4(dev)         (IS_SKYLAKE(dev) && \
    2645             :                                  (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
    2646             : 
    2647             : #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
    2648             : 
    2649             : #define SKL_REVID_A0            (0x0)
    2650             : #define SKL_REVID_B0            (0x1)
    2651             : #define SKL_REVID_C0            (0x2)
    2652             : #define SKL_REVID_D0            (0x3)
    2653             : #define SKL_REVID_E0            (0x4)
    2654             : #define SKL_REVID_F0            (0x5)
    2655             : 
    2656             : #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
    2657             : 
    2658             : #define BXT_REVID_A0            (0x0)
    2659             : #define BXT_REVID_A1            (0x1)
    2660             : #define BXT_REVID_B0            (0x3)
    2661             : #define BXT_REVID_C0            (0x9)
    2662             : 
    2663             : #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
    2664             : 
    2665             : #define KBL_REVID_A0            (0x0)
    2666             : #define KBL_REVID_B0            (0x1)
    2667             : #define KBL_REVID_C0            (0x2)
    2668             : #define KBL_REVID_D0            (0x3)
    2669             : #define KBL_REVID_E0            (0x4)
    2670             : 
    2671             : #define IS_KBL_REVID(p, since, until) (IS_KABYLAKE(p) && IS_REVID(p, since, until))
    2672             : 
    2673             : /*
    2674             :  * The genX designation typically refers to the render engine, so render
    2675             :  * capability related checks should use IS_GEN, while display and other checks
    2676             :  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
    2677             :  * chips, etc.).
    2678             :  */
    2679             : #define IS_GEN2(dev)    (INTEL_INFO(dev)->gen == 2)
    2680             : #define IS_GEN3(dev)    (INTEL_INFO(dev)->gen == 3)
    2681             : #define IS_GEN4(dev)    (INTEL_INFO(dev)->gen == 4)
    2682             : #define IS_GEN5(dev)    (INTEL_INFO(dev)->gen == 5)
    2683             : #define IS_GEN6(dev)    (INTEL_INFO(dev)->gen == 6)
    2684             : #define IS_GEN7(dev)    (INTEL_INFO(dev)->gen == 7)
    2685             : #define IS_GEN8(dev)    (INTEL_INFO(dev)->gen == 8)
    2686             : #define IS_GEN9(dev)    (INTEL_INFO(dev)->gen == 9)
    2687             : 
    2688             : #define RENDER_RING             (1<<RCS)
    2689             : #define BSD_RING                (1<<VCS)
    2690             : #define BLT_RING                (1<<BCS)
    2691             : #define VEBOX_RING              (1<<VECS)
    2692             : #define BSD2_RING               (1<<VCS2)
    2693             : #define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
    2694             : #define HAS_BSD2(dev)           (INTEL_INFO(dev)->ring_mask & BSD2_RING)
    2695             : #define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
    2696             : #define HAS_VEBOX(dev)          (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
    2697             : #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
    2698             : #define HAS_WT(dev)             ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
    2699             :                                  __I915__(dev)->ellc_size)
    2700             : #define I915_NEED_GFX_HWS(dev)  (INTEL_INFO(dev)->need_gfx_hws)
    2701             : 
    2702             : #define HAS_HW_CONTEXTS(dev)    (INTEL_INFO(dev)->gen >= 6)
    2703             : #define HAS_LOGICAL_RING_CONTEXTS(dev)  (INTEL_INFO(dev)->gen >= 8)
    2704             : #define USES_PPGTT(dev)         (i915.enable_ppgtt)
    2705             : #define USES_FULL_PPGTT(dev)    (i915.enable_ppgtt >= 2)
    2706             : #define USES_FULL_48BIT_PPGTT(dev)      (i915.enable_ppgtt == 3)
    2707             : 
    2708             : #define HAS_OVERLAY(dev)                (INTEL_INFO(dev)->has_overlay)
    2709             : #define OVERLAY_NEEDS_PHYSICAL(dev)     (INTEL_INFO(dev)->overlay_needs_physical)
    2710             : 
    2711             : /* Early gen2 have a totally busted CS tlb and require pinned batches. */
    2712             : #define HAS_BROKEN_CS_TLB(dev)          (IS_I830(dev) || IS_845G(dev))
    2713             : /*
    2714             :  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
    2715             :  * even when in MSI mode. This results in spurious interrupt warnings if the
    2716             :  * legacy irq no. is shared with another device. The kernel then disables that
    2717             :  * interrupt source and so prevents the other device from working properly.
    2718             :  */
    2719             : #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
    2720             : #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
    2721             : 
    2722             : /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
    2723             :  * rows, which changed the alignment requirements and fence programming.
    2724             :  */
    2725             : #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
    2726             :                                                       IS_I915GM(dev)))
    2727             : #define SUPPORTS_TV(dev)                (INTEL_INFO(dev)->supports_tv)
    2728             : #define I915_HAS_HOTPLUG(dev)            (INTEL_INFO(dev)->has_hotplug)
    2729             : 
    2730             : #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
    2731             : #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
    2732             : #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
    2733             : 
    2734             : #define HAS_IPS(dev)            (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
    2735             : 
    2736             : #define HAS_DP_MST(dev)         (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
    2737             :                                  INTEL_INFO(dev)->gen >= 9)
    2738             : 
    2739             : #define HAS_DDI(dev)            (INTEL_INFO(dev)->has_ddi)
    2740             : #define HAS_FPGA_DBG_UNCLAIMED(dev)     (INTEL_INFO(dev)->has_fpga_dbg)
    2741             : #define HAS_PSR(dev)            (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
    2742             :                                  IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
    2743             :                                  IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
    2744             : #define HAS_RUNTIME_PM(dev)     (IS_GEN6(dev) || IS_HASWELL(dev) || \
    2745             :                                  IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
    2746             :                                  IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
    2747             : #define HAS_RC6(dev)            (INTEL_INFO(dev)->gen >= 6)
    2748             : #define HAS_RC6p(dev)           (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
    2749             : 
    2750             : #define HAS_CSR(dev)    (IS_GEN9(dev))
    2751             : 
    2752             : #define HAS_GUC_UCODE(dev)      (IS_GEN9(dev) && !IS_KABYLAKE(dev))
    2753             : #define HAS_GUC_SCHED(dev)      (IS_GEN9(dev) && !IS_KABYLAKE(dev))
    2754             : 
    2755             : #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
    2756             :                                     INTEL_INFO(dev)->gen >= 8)
    2757             : 
    2758             : #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
    2759             :                                  !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
    2760             : 
    2761             : #define INTEL_PCH_DEVICE_ID_MASK                0xff00
    2762             : #define INTEL_PCH_IBX_DEVICE_ID_TYPE            0x3b00
    2763             : #define INTEL_PCH_CPT_DEVICE_ID_TYPE            0x1c00
    2764             : #define INTEL_PCH_PPT_DEVICE_ID_TYPE            0x1e00
    2765             : #define INTEL_PCH_LPT_DEVICE_ID_TYPE            0x8c00
    2766             : #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE         0x9c00
    2767             : #define INTEL_PCH_SPT_DEVICE_ID_TYPE            0xA100
    2768             : #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE         0x9D00
    2769             : #define INTEL_PCH_KBP_DEVICE_ID_TYPE            0xA200
    2770             : #define INTEL_PCH_P2X_DEVICE_ID_TYPE            0x7100
    2771             : #define INTEL_PCH_QEMU_DEVICE_ID_TYPE           0x2900 /* qemu q35 has 2918 */
    2772             : 
    2773             : #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
    2774             : #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
    2775             : #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
    2776             : #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
    2777             : #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
    2778             : #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
    2779             : #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
    2780             : #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
    2781             : #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
    2782             : 
    2783             : #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
    2784             : 
    2785             : /* DPF == dynamic parity feature */
    2786             : #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
    2787             : #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
    2788             : 
    2789             : #define GT_FREQUENCY_MULTIPLIER 50
    2790             : #define GEN9_FREQ_SCALER 3
    2791             : 
    2792             : #include "i915_trace.h"
    2793             : 
    2794             : extern const struct drm_ioctl_desc i915_ioctls[];
    2795             : extern int i915_max_ioctl;
    2796             : 
    2797             : #ifdef __linux__
    2798             : extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
    2799             : extern int i915_resume_switcheroo(struct drm_device *dev);
    2800             : #endif
    2801             : 
    2802             : /* i915_params.c */
    2803             : struct i915_params {
    2804             :         int modeset;
    2805             :         int panel_ignore_lid;
    2806             :         int semaphores;
    2807             :         int lvds_channel_mode;
    2808             :         int panel_use_ssc;
    2809             :         int vbt_sdvo_panel_type;
    2810             :         int enable_rc6;
    2811             :         int enable_fbc;
    2812             :         int enable_ppgtt;
    2813             :         int enable_execlists;
    2814             :         int enable_psr;
    2815             :         unsigned int preliminary_hw_support;
    2816             :         int disable_power_well;
    2817             :         int enable_ips;
    2818             :         int invert_brightness;
    2819             :         int enable_cmd_parser;
    2820             :         /* leave bools at the end to not create holes */
    2821             :         bool enable_hangcheck;
    2822             :         bool fastboot;
    2823             :         bool prefault_disable;
    2824             :         bool load_detect_test;
    2825             :         bool reset;
    2826             :         bool disable_display;
    2827             :         bool disable_vtd_wa;
    2828             :         bool enable_guc_submission;
    2829             :         int guc_log_level;
    2830             :         int use_mmio_flip;
    2831             :         int mmio_debug;
    2832             :         bool verbose_state_checks;
    2833             :         bool nuclear_pageflip;
    2834             :         int edp_vswing;
    2835             : };
    2836             : extern struct i915_params i915 __read_mostly;
    2837             : 
    2838             :                                 /* i915_dma.c */
    2839             : extern int i915_driver_load(struct drm_device *, unsigned long flags);
    2840             : extern int i915_driver_unload(struct drm_device *);
    2841             : extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
    2842             : extern void i915_driver_lastclose(struct drm_device * dev);
    2843             : extern void i915_driver_preclose(struct drm_device *dev,
    2844             :                                  struct drm_file *file);
    2845             : extern void i915_driver_postclose(struct drm_device *dev,
    2846             :                                   struct drm_file *file);
    2847             : #ifdef CONFIG_COMPAT
    2848             : extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
    2849             :                               unsigned long arg);
    2850             : #endif
    2851             : extern int intel_gpu_reset(struct drm_device *dev);
    2852             : extern bool intel_has_gpu_reset(struct drm_device *dev);
    2853             : extern int i915_reset(struct drm_device *dev);
    2854             : extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
    2855             : extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
    2856             : extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
    2857             : extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
    2858             : int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
    2859             : void i915_firmware_load_error_print(const char *fw_path, int err);
    2860             : 
    2861             : /* intel_hotplug.c */
    2862             : void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
    2863             : void intel_hpd_init(struct drm_i915_private *dev_priv);
    2864             : void intel_hpd_init_work(struct drm_i915_private *dev_priv);
    2865             : void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
    2866             : bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
    2867             : 
    2868             : /* i915_irq.c */
    2869             : void i915_queue_hangcheck(struct drm_device *dev);
    2870             : __printf(3, 4)
    2871             : void i915_handle_error(struct drm_device *dev, bool wedged,
    2872             :                        const char *fmt, ...);
    2873             : 
    2874             : extern void intel_irq_init(struct drm_i915_private *dev_priv);
    2875             : int intel_irq_install(struct drm_i915_private *dev_priv);
    2876             : void intel_irq_uninstall(struct drm_i915_private *dev_priv);
    2877             : 
    2878             : extern void intel_uncore_sanitize(struct drm_device *dev);
    2879             : extern void intel_uncore_early_sanitize(struct drm_device *dev,
    2880             :                                         bool restore_forcewake);
    2881             : extern void intel_uncore_init(struct drm_device *dev);
    2882             : extern void intel_uncore_check_errors(struct drm_device *dev);
    2883             : extern void intel_uncore_fini(struct drm_device *dev);
    2884             : extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
    2885             : const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
    2886             : void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
    2887             :                                 enum forcewake_domains domains);
    2888             : void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
    2889             :                                 enum forcewake_domains domains);
    2890             : /* Like above but the caller must manage the uncore.lock itself.
    2891             :  * Must be used with I915_READ_FW and friends.
    2892             :  */
    2893             : void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
    2894             :                                         enum forcewake_domains domains);
    2895             : void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
    2896             :                                         enum forcewake_domains domains);
    2897             : void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
    2898           0 : static inline bool intel_vgpu_active(struct drm_device *dev)
    2899             : {
    2900             : #ifdef __linux__
    2901             :         return to_i915(dev)->vgpu.active;
    2902             : #else
    2903           0 :         return false;
    2904             : #endif
    2905             : }
    2906             : 
    2907             : void
    2908             : i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
    2909             :                      u32 status_mask);
    2910             : 
    2911             : void
    2912             : i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
    2913             :                       u32 status_mask);
    2914             : 
    2915             : void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
    2916             : void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
    2917             : void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
    2918             :                                    uint32_t mask,
    2919             :                                    uint32_t bits);
    2920             : void
    2921             : ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
    2922             : void
    2923             : ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
    2924             : void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
    2925             :                                   uint32_t interrupt_mask,
    2926             :                                   uint32_t enabled_irq_mask);
    2927             : #define ibx_enable_display_interrupt(dev_priv, bits) \
    2928             :         ibx_display_interrupt_update((dev_priv), (bits), (bits))
    2929             : #define ibx_disable_display_interrupt(dev_priv, bits) \
    2930             :         ibx_display_interrupt_update((dev_priv), (bits), 0)
    2931             : 
    2932             : /* i915_gem.c */
    2933             : int i915_gem_create_ioctl(struct drm_device *dev, void *data,
    2934             :                           struct drm_file *file_priv);
    2935             : int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
    2936             :                          struct drm_file *file_priv);
    2937             : int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
    2938             :                           struct drm_file *file_priv);
    2939             : int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
    2940             :                         struct drm_file *file_priv);
    2941             : int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
    2942             :                         struct drm_file *file_priv);
    2943             : int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
    2944             :                               struct drm_file *file_priv);
    2945             : int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
    2946             :                              struct drm_file *file_priv);
    2947             : void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
    2948             :                                         struct drm_i915_gem_request *req);
    2949             : void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
    2950             : int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
    2951             :                                    struct drm_i915_gem_execbuffer2 *args,
    2952             :                                    struct list_head *vmas);
    2953             : int i915_gem_execbuffer(struct drm_device *dev, void *data,
    2954             :                         struct drm_file *file_priv);
    2955             : int i915_gem_execbuffer2(struct drm_device *dev, void *data,
    2956             :                          struct drm_file *file_priv);
    2957             : int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
    2958             :                         struct drm_file *file_priv);
    2959             : int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
    2960             :                                struct drm_file *file);
    2961             : int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
    2962             :                                struct drm_file *file);
    2963             : int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
    2964             :                             struct drm_file *file_priv);
    2965             : int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
    2966             :                            struct drm_file *file_priv);
    2967             : int i915_gem_set_tiling(struct drm_device *dev, void *data,
    2968             :                         struct drm_file *file_priv);
    2969             : int i915_gem_get_tiling(struct drm_device *dev, void *data,
    2970             :                         struct drm_file *file_priv);
    2971             : int i915_gem_init_userptr(struct drm_device *dev);
    2972             : int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
    2973             :                            struct drm_file *file);
    2974             : int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
    2975             :                                 struct drm_file *file_priv);
    2976             : int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
    2977             :                         struct drm_file *file_priv);
    2978             : void i915_gem_load(struct drm_device *dev);
    2979             : void *i915_gem_object_alloc(struct drm_device *dev);
    2980             : void i915_gem_object_free(struct drm_i915_gem_object *obj);
    2981             : void i915_gem_object_init(struct drm_i915_gem_object *obj,
    2982             :                          const struct drm_i915_gem_object_ops *ops);
    2983             : struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
    2984             :                                                   size_t size);
    2985             : struct drm_i915_gem_object *i915_gem_object_create_from_data(
    2986             :                 struct drm_device *dev, const void *data, size_t size);
    2987             : void i915_gem_free_object(struct drm_gem_object *obj);
    2988             : void i915_gem_vma_destroy(struct i915_vma *vma);
    2989             : 
    2990             : /* Flags used by pin/bind&friends. */
    2991             : #define PIN_MAPPABLE    (1<<0)
    2992             : #define PIN_NONBLOCK    (1<<1)
    2993             : #define PIN_GLOBAL      (1<<2)
    2994             : #define PIN_OFFSET_BIAS (1<<3)
    2995             : #define PIN_USER        (1<<4)
    2996             : #define PIN_UPDATE      (1<<5)
    2997             : #define PIN_ZONE_4G     (1<<6)
    2998             : #define PIN_HIGH        (1<<7)
    2999             : #define PIN_OFFSET_MASK (~4095)
    3000             : int __must_check
    3001             : i915_gem_object_pin(struct drm_i915_gem_object *obj,
    3002             :                     struct i915_address_space *vm,
    3003             :                     uint32_t alignment,
    3004             :                     uint64_t flags);
    3005             : int __must_check
    3006             : i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
    3007             :                          const struct i915_ggtt_view *view,
    3008             :                          uint32_t alignment,
    3009             :                          uint64_t flags);
    3010             : 
    3011             : int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
    3012             :                   u32 flags);
    3013             : void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
    3014             : int __must_check i915_vma_unbind(struct i915_vma *vma);
    3015             : /*
    3016             :  * BEWARE: Do not use the function below unless you can _absolutely_
    3017             :  * _guarantee_ VMA in question is _not in use_ anywhere.
    3018             :  */
    3019             : int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
    3020             : int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
    3021             : void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
    3022             : void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
    3023             : 
    3024             : int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
    3025             :                                     int *needs_clflush);
    3026             : 
    3027             : int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
    3028             : 
    3029           0 : static inline int __sg_page_count(struct scatterlist *sg)
    3030             : {
    3031           0 :         return sg->length >> PAGE_SHIFT;
    3032             : }
    3033             : 
    3034             : #ifdef __linux__
    3035             : static inline struct page *
    3036             : i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
    3037             : {
    3038             :         if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
    3039             :                 return NULL;
    3040             : 
    3041             :         if (n < obj->get_page.last) {
    3042             :                 obj->get_page.sg = obj->pages->sgl;
    3043             :                 obj->get_page.last = 0;
    3044             :         }
    3045             : 
    3046             :         while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
    3047             :                 obj->get_page.last += __sg_page_count(obj->get_page.sg++);
    3048             :                 if (unlikely(sg_is_chain(obj->get_page.sg)))
    3049             :                         obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
    3050             :         }
    3051             : 
    3052             :         return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
    3053             : }
    3054             : #else
    3055             : static inline struct vm_page *
    3056           0 : i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
    3057             : {
    3058           0 :         if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
    3059           0 :                 return NULL;
    3060             : 
    3061           0 :         if (n < obj->get_page.last) {
    3062           0 :                 obj->get_page.sg = obj->pages->sgl;
    3063           0 :                 obj->get_page.last = 0;
    3064           0 :         }
    3065             : 
    3066           0 :         while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n)
    3067           0 :                 obj->get_page.last += __sg_page_count(obj->get_page.sg++);
    3068             : 
    3069           0 :         return PHYS_TO_VM_PAGE(obj->get_page.sg->dma_address +
    3070           0 :                                (n - obj->get_page.last) * PAGE_SIZE);
    3071           0 : }
    3072             : #endif
    3073             : 
    3074           0 : static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
    3075             : {
    3076           0 :         BUG_ON(obj->pages == NULL);
    3077           0 :         obj->pages_pin_count++;
    3078           0 : }
    3079           0 : static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
    3080             : {
    3081           0 :         BUG_ON(obj->pages_pin_count == 0);
    3082           0 :         obj->pages_pin_count--;
    3083           0 : }
    3084             : 
    3085             : int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
    3086             : int i915_gem_object_sync(struct drm_i915_gem_object *obj,
    3087             :                          struct intel_engine_cs *to,
    3088             :                          struct drm_i915_gem_request **to_req);
    3089             : void i915_vma_move_to_active(struct i915_vma *vma,
    3090             :                              struct drm_i915_gem_request *req);
    3091             : int i915_gem_dumb_create(struct drm_file *file_priv,
    3092             :                          struct drm_device *dev,
    3093             :                          struct drm_mode_create_dumb *args);
    3094             : int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
    3095             :                       uint32_t handle, uint64_t *offset);
    3096             : /**
    3097             :  * Returns true if seq1 is later than seq2.
    3098             :  */
    3099             : static inline bool
    3100           0 : i915_seqno_passed(uint32_t seq1, uint32_t seq2)
    3101             : {
    3102           0 :         return (int32_t)(seq1 - seq2) >= 0;
    3103             : }
    3104             : 
    3105           0 : static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
    3106             :                                            bool lazy_coherency)
    3107             : {
    3108           0 :         u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
    3109           0 :         return i915_seqno_passed(seqno, req->previous_seqno);
    3110             : }
    3111             : 
    3112           0 : static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
    3113             :                                               bool lazy_coherency)
    3114             : {
    3115           0 :         u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
    3116           0 :         return i915_seqno_passed(seqno, req->seqno);
    3117             : }
    3118             : 
    3119             : int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
    3120             : int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
    3121             : 
    3122             : struct drm_i915_gem_request *
    3123             : i915_gem_find_active_request(struct intel_engine_cs *ring);
    3124             : 
    3125             : bool i915_gem_retire_requests(struct drm_device *dev);
    3126             : void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
    3127             : int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
    3128             :                                       bool interruptible);
    3129             : 
    3130           0 : static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
    3131             : {
    3132           0 :         return unlikely(atomic_read(&error->reset_counter)
    3133             :                         & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
    3134             : }
    3135             : 
    3136           0 : static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
    3137             : {
    3138           0 :         return atomic_read(&error->reset_counter) & I915_WEDGED;
    3139             : }
    3140             : 
    3141           0 : static inline u32 i915_reset_count(struct i915_gpu_error *error)
    3142             : {
    3143           0 :         return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
    3144             : }
    3145             : 
    3146           0 : static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
    3147             : {
    3148           0 :         return dev_priv->gpu_error.stop_rings == 0 ||
    3149           0 :                 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
    3150             : }
    3151             : 
    3152           0 : static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
    3153             : {
    3154           0 :         return dev_priv->gpu_error.stop_rings == 0 ||
    3155           0 :                 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
    3156             : }
    3157             : 
    3158             : void i915_gem_reset(struct drm_device *dev);
    3159             : bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
    3160             : int __must_check i915_gem_init(struct drm_device *dev);
    3161             : int i915_gem_init_rings(struct drm_device *dev);
    3162             : int __must_check i915_gem_init_hw(struct drm_device *dev);
    3163             : int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
    3164             : void i915_gem_init_swizzling(struct drm_device *dev);
    3165             : void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
    3166             : int __must_check i915_gpu_idle(struct drm_device *dev);
    3167             : int __must_check i915_gem_suspend(struct drm_device *dev);
    3168             : void __i915_add_request(struct drm_i915_gem_request *req,
    3169             :                         struct drm_i915_gem_object *batch_obj,
    3170             :                         bool flush_caches);
    3171             : #define i915_add_request(req) \
    3172             :         __i915_add_request(req, NULL, true)
    3173             : #define i915_add_request_no_flush(req) \
    3174             :         __i915_add_request(req, NULL, false)
    3175             : int __i915_wait_request(struct drm_i915_gem_request *req,
    3176             :                         unsigned reset_counter,
    3177             :                         bool interruptible,
    3178             :                         s64 *timeout,
    3179             :                         struct intel_rps_client *rps);
    3180             : int __must_check i915_wait_request(struct drm_i915_gem_request *req);
    3181             : #ifdef __linux__
    3182             : int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
    3183             : #else
    3184             : int i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
    3185             :                    off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages,
    3186             :                    int centeridx, vm_prot_t access_type, int flags);
    3187             : #endif
    3188             : int __must_check
    3189             : i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
    3190             :                                bool readonly);
    3191             : int __must_check
    3192             : i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
    3193             :                                   bool write);
    3194             : int __must_check
    3195             : i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
    3196             : int __must_check
    3197             : i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
    3198             :                                      u32 alignment,
    3199             :                                      struct intel_engine_cs *pipelined,
    3200             :                                      struct drm_i915_gem_request **pipelined_request,
    3201             :                                      const struct i915_ggtt_view *view);
    3202             : void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
    3203             :                                               const struct i915_ggtt_view *view);
    3204             : int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
    3205             :                                 int align);
    3206             : int i915_gem_open(struct drm_device *dev, struct drm_file *file);
    3207             : void i915_gem_release(struct drm_device *dev, struct drm_file *file);
    3208             : 
    3209             : uint32_t
    3210             : i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
    3211             : uint32_t
    3212             : i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
    3213             :                             int tiling_mode, bool fenced);
    3214             : 
    3215             : int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
    3216             :                                     enum i915_cache_level cache_level);
    3217             : 
    3218             : struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
    3219             :                                 struct dma_buf *dma_buf);
    3220             : 
    3221             : struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
    3222             :                                 struct drm_gem_object *gem_obj, int flags);
    3223             : 
    3224             : u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
    3225             :                                   const struct i915_ggtt_view *view);
    3226             : u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
    3227             :                         struct i915_address_space *vm);
    3228             : static inline u64
    3229           0 : i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
    3230             : {
    3231           0 :         return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
    3232             : }
    3233             : 
    3234             : bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
    3235             : bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
    3236             :                                   const struct i915_ggtt_view *view);
    3237             : bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
    3238             :                         struct i915_address_space *vm);
    3239             : 
    3240             : unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
    3241             :                                 struct i915_address_space *vm);
    3242             : struct i915_vma *
    3243             : i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
    3244             :                     struct i915_address_space *vm);
    3245             : struct i915_vma *
    3246             : i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
    3247             :                           const struct i915_ggtt_view *view);
    3248             : 
    3249             : struct i915_vma *
    3250             : i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
    3251             :                                   struct i915_address_space *vm);
    3252             : struct i915_vma *
    3253             : i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
    3254             :                                        const struct i915_ggtt_view *view);
    3255             : 
    3256             : static inline struct i915_vma *
    3257           0 : i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
    3258             : {
    3259           0 :         return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
    3260             : }
    3261             : bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
    3262             : 
    3263             : /* Some GGTT VM helpers */
    3264             : #define i915_obj_to_ggtt(obj) \
    3265             :         (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
    3266           0 : static inline bool i915_is_ggtt(struct i915_address_space *vm)
    3267             : {
    3268             :         struct i915_address_space *ggtt =
    3269           0 :                 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
    3270           0 :         return vm == ggtt;
    3271             : }
    3272             : 
    3273             : static inline struct i915_hw_ppgtt *
    3274           0 : i915_vm_to_ppgtt(struct i915_address_space *vm)
    3275             : {
    3276           0 :         WARN_ON(i915_is_ggtt(vm));
    3277             : 
    3278           0 :         return container_of(vm, struct i915_hw_ppgtt, base);
    3279             : }
    3280             : 
    3281             : 
    3282           0 : static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
    3283             : {
    3284           0 :         return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
    3285             : }
    3286             : 
    3287             : static inline unsigned long
    3288           0 : i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
    3289             : {
    3290           0 :         return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
    3291             : }
    3292             : 
    3293             : static inline int __must_check
    3294           0 : i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
    3295             :                       uint32_t alignment,
    3296             :                       unsigned flags)
    3297             : {
    3298           0 :         return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
    3299           0 :                                    alignment, flags | PIN_GLOBAL);
    3300             : }
    3301             : 
    3302             : static inline int
    3303           0 : i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
    3304             : {
    3305           0 :         return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
    3306             : }
    3307             : 
    3308             : void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
    3309             :                                      const struct i915_ggtt_view *view);
    3310             : static inline void
    3311           0 : i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
    3312             : {
    3313           0 :         i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
    3314           0 : }
    3315             : 
    3316             : /* i915_gem_fence.c */
    3317             : int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
    3318             : int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
    3319             : 
    3320             : bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
    3321             : void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
    3322             : 
    3323             : void i915_gem_restore_fences(struct drm_device *dev);
    3324             : 
    3325             : void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
    3326             : void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
    3327             : void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
    3328             : 
    3329             : /* i915_gem_context.c */
    3330             : int __must_check i915_gem_context_init(struct drm_device *dev);
    3331             : void i915_gem_context_fini(struct drm_device *dev);
    3332             : void i915_gem_context_reset(struct drm_device *dev);
    3333             : int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
    3334             : int i915_gem_context_enable(struct drm_i915_gem_request *req);
    3335             : void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
    3336             : int i915_switch_context(struct drm_i915_gem_request *req);
    3337             : struct intel_context *
    3338             : i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
    3339             : void i915_gem_context_free(struct kref *ctx_ref);
    3340             : struct drm_i915_gem_object *
    3341             : i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
    3342           0 : static inline void i915_gem_context_reference(struct intel_context *ctx)
    3343             : {
    3344           0 :         kref_get(&ctx->ref);
    3345           0 : }
    3346             : 
    3347           0 : static inline void i915_gem_context_unreference(struct intel_context *ctx)
    3348             : {
    3349           0 :         kref_put(&ctx->ref, i915_gem_context_free);
    3350           0 : }
    3351             : 
    3352           0 : static inline bool i915_gem_context_is_default(const struct intel_context *c)
    3353             : {
    3354           0 :         return c->user_handle == DEFAULT_CONTEXT_HANDLE;
    3355             : }
    3356             : 
    3357             : int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
    3358             :                                   struct drm_file *file);
    3359             : int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
    3360             :                                    struct drm_file *file);
    3361             : int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
    3362             :                                     struct drm_file *file_priv);
    3363             : int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
    3364             :                                     struct drm_file *file_priv);
    3365             : 
    3366             : /* i915_gem_evict.c */
    3367             : int __must_check i915_gem_evict_something(struct drm_device *dev,
    3368             :                                           struct i915_address_space *vm,
    3369             :                                           int min_size,
    3370             :                                           unsigned alignment,
    3371             :                                           unsigned cache_level,
    3372             :                                           unsigned long start,
    3373             :                                           unsigned long end,
    3374             :                                           unsigned flags);
    3375             : int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
    3376             : 
    3377             : /* belongs in i915_gem_gtt.h */
    3378           0 : static inline void i915_gem_chipset_flush(struct drm_device *dev)
    3379             : {
    3380           0 :         if (INTEL_INFO(dev)->gen < 6)
    3381           0 :                 intel_gtt_chipset_flush();
    3382           0 : }
    3383             : 
    3384             : /* i915_gem_stolen.c */
    3385             : int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
    3386             :                                 struct drm_mm_node *node, u64 size,
    3387             :                                 unsigned alignment);
    3388             : int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
    3389             :                                          struct drm_mm_node *node, u64 size,
    3390             :                                          unsigned alignment, u64 start,
    3391             :                                          u64 end);
    3392             : void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
    3393             :                                  struct drm_mm_node *node);
    3394             : int i915_gem_init_stolen(struct drm_device *dev);
    3395             : void i915_gem_cleanup_stolen(struct drm_device *dev);
    3396             : struct drm_i915_gem_object *
    3397             : i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
    3398             : struct drm_i915_gem_object *
    3399             : i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
    3400             :                                                u32 stolen_offset,
    3401             :                                                u32 gtt_offset,
    3402             :                                                u32 size);
    3403             : 
    3404             : /* i915_gem_shrinker.c */
    3405             : unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
    3406             :                               unsigned long target,
    3407             :                               unsigned flags);
    3408             : #define I915_SHRINK_PURGEABLE 0x1
    3409             : #define I915_SHRINK_UNBOUND 0x2
    3410             : #define I915_SHRINK_BOUND 0x4
    3411             : #define I915_SHRINK_ACTIVE 0x8
    3412             : unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
    3413             : void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
    3414             : 
    3415             : 
    3416             : /* i915_gem_tiling.c */
    3417           0 : static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
    3418             : {
    3419           0 :         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
    3420             : 
    3421           0 :         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
    3422           0 :                 obj->tiling_mode != I915_TILING_NONE;
    3423             : }
    3424             : 
    3425             : /* i915_gem_debug.c */
    3426             : #if WATCH_LISTS
    3427             : int i915_verify_lists(struct drm_device *dev);
    3428             : #else
    3429             : #define i915_verify_lists(dev) 0
    3430             : #endif
    3431             : 
    3432             : /* i915_debugfs.c */
    3433             : int i915_debugfs_init(struct drm_minor *minor);
    3434             : void i915_debugfs_cleanup(struct drm_minor *minor);
    3435             : #ifdef CONFIG_DEBUG_FS
    3436             : int i915_debugfs_connector_add(struct drm_connector *connector);
    3437             : void intel_display_crc_init(struct drm_device *dev);
    3438             : #else
    3439           0 : static inline int i915_debugfs_connector_add(struct drm_connector *connector)
    3440           0 : { return 0; }
    3441           0 : static inline void intel_display_crc_init(struct drm_device *dev) {}
    3442             : #endif
    3443             : 
    3444             : /* i915_gpu_error.c */
    3445             : __printf(2, 3)
    3446             : void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
    3447             : int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
    3448             :                             const struct i915_error_state_file_priv *error);
    3449             : int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
    3450             :                               struct drm_i915_private *i915,
    3451             :                               size_t count, loff_t pos);
    3452             : static inline void i915_error_state_buf_release(
    3453             :         struct drm_i915_error_state_buf *eb)
    3454             : {
    3455             :         kfree(eb->buf);
    3456             : }
    3457             : void i915_capture_error_state(struct drm_device *dev, bool wedge,
    3458             :                               const char *error_msg);
    3459             : void i915_error_state_get(struct drm_device *dev,
    3460             :                           struct i915_error_state_file_priv *error_priv);
    3461             : void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
    3462             : void i915_destroy_error_state(struct drm_device *dev);
    3463             : 
    3464             : void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
    3465             : const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
    3466             : 
    3467             : /* i915_cmd_parser.c */
    3468             : int i915_cmd_parser_get_version(void);
    3469             : int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
    3470             : void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
    3471             : bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
    3472             : int i915_parse_cmds(struct intel_engine_cs *ring,
    3473             :                     struct drm_i915_gem_object *batch_obj,
    3474             :                     struct drm_i915_gem_object *shadow_batch_obj,
    3475             :                     u32 batch_start_offset,
    3476             :                     u32 batch_len,
    3477             :                     bool is_master);
    3478             : 
    3479             : /* i915_suspend.c */
    3480             : extern int i915_save_state(struct drm_device *dev);
    3481             : extern int i915_restore_state(struct drm_device *dev);
    3482             : 
    3483             : /* i915_sysfs.c */
    3484             : void i915_setup_sysfs(struct drm_device *dev_priv);
    3485             : void i915_teardown_sysfs(struct drm_device *dev_priv);
    3486             : 
    3487             : /* intel_i2c.c */
    3488             : extern int intel_setup_gmbus(struct drm_device *dev);
    3489             : extern void intel_teardown_gmbus(struct drm_device *dev);
    3490             : extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
    3491             :                                      unsigned int pin);
    3492             : 
    3493             : extern struct i2c_adapter *
    3494             : intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
    3495             : extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
    3496             : extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
    3497           0 : static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
    3498             : {
    3499           0 :         return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
    3500             : }
    3501             : extern void intel_i2c_reset(struct drm_device *dev);
    3502             : 
    3503             : /* intel_bios.c */
    3504             : bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
    3505             : 
    3506             : /* intel_opregion.c */
    3507             : #ifdef CONFIG_ACPI
    3508             : extern int intel_opregion_setup(struct drm_device *dev);
    3509             : extern void intel_opregion_init(struct drm_device *dev);
    3510             : extern void intel_opregion_fini(struct drm_device *dev);
    3511             : extern void intel_opregion_asle_intr(struct drm_device *dev);
    3512             : extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
    3513             :                                          bool enable);
    3514             : extern int intel_opregion_notify_adapter(struct drm_device *dev,
    3515             :                                          pci_power_t state);
    3516             : #else
    3517             : static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
    3518             : static inline void intel_opregion_init(struct drm_device *dev) { return; }
    3519             : static inline void intel_opregion_fini(struct drm_device *dev) { return; }
    3520             : static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
    3521             : static inline int
    3522             : intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
    3523             : {
    3524             :         return 0;
    3525             : }
    3526             : static inline int
    3527             : intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
    3528             : {
    3529             :         return 0;
    3530             : }
    3531             : #endif
    3532             : 
    3533             : /* intel_acpi.c */
    3534             : #ifdef CONFIG_ACPI
    3535             : extern void intel_register_dsm_handler(void);
    3536             : extern void intel_unregister_dsm_handler(void);
    3537             : #else
    3538             : static inline void intel_register_dsm_handler(void) { return; }
    3539             : static inline void intel_unregister_dsm_handler(void) { return; }
    3540             : #endif /* CONFIG_ACPI */
    3541             : 
    3542             : /* modesetting */
    3543             : extern void intel_modeset_init_hw(struct drm_device *dev);
    3544             : extern void intel_modeset_init(struct drm_device *dev);
    3545             : extern void intel_modeset_gem_init(struct drm_device *dev);
    3546             : extern void intel_modeset_cleanup(struct drm_device *dev);
    3547             : extern void intel_connector_unregister(struct intel_connector *);
    3548             : extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
    3549             : extern void intel_display_resume(struct drm_device *dev);
    3550             : extern void i915_redisable_vga(struct drm_device *dev);
    3551             : extern void i915_redisable_vga_power_on(struct drm_device *dev);
    3552             : extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
    3553             : extern void intel_init_pch_refclk(struct drm_device *dev);
    3554             : extern void intel_set_rps(struct drm_device *dev, u8 val);
    3555             : extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
    3556             :                                   bool enable);
    3557             : extern void intel_detect_pch(struct drm_device *dev);
    3558             : extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
    3559             : extern int intel_enable_rc6(const struct drm_device *dev);
    3560             : 
    3561             : extern bool i915_semaphore_is_enabled(struct drm_device *dev);
    3562             : int i915_reg_read_ioctl(struct drm_device *dev, void *data,
    3563             :                         struct drm_file *file);
    3564             : int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
    3565             :                                struct drm_file *file);
    3566             : 
    3567             : /* overlay */
    3568             : extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
    3569             : extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
    3570             :                                             struct intel_overlay_error_state *error);
    3571             : 
    3572             : extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
    3573             : extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
    3574             :                                             struct drm_device *dev,
    3575             :                                             struct intel_display_error_state *error);
    3576             : 
    3577             : int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
    3578             : int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
    3579             : 
    3580             : /* intel_sideband.c */
    3581             : u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
    3582             : void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
    3583             : u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
    3584             : u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
    3585             : void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
    3586             : u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
    3587             : void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
    3588             : u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
    3589             : void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
    3590             : u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
    3591             : void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
    3592             : u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
    3593             : void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
    3594             : u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
    3595             : void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
    3596             : u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
    3597             :                    enum intel_sbi_destination destination);
    3598             : void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
    3599             :                      enum intel_sbi_destination destination);
    3600             : u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
    3601             : void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
    3602             : 
    3603             : int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
    3604             : int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
    3605             : 
    3606             : #define I915_READ8(reg)         dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
    3607             : #define I915_WRITE8(reg, val)   dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
    3608             : 
    3609             : #define I915_READ16(reg)        dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
    3610             : #define I915_WRITE16(reg, val)  dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
    3611             : #define I915_READ16_NOTRACE(reg)        dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
    3612             : #define I915_WRITE16_NOTRACE(reg, val)  dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
    3613             : 
    3614             : #define I915_READ(reg)          dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
    3615             : #define I915_WRITE(reg, val)    dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
    3616             : #define I915_READ_NOTRACE(reg)          dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
    3617             : #define I915_WRITE_NOTRACE(reg, val)    dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
    3618             : 
    3619             : /* Be very careful with read/write 64-bit values. On 32-bit machines, they
    3620             :  * will be implemented using 2 32-bit writes in an arbitrary order with
    3621             :  * an arbitrary delay between them. This can cause the hardware to
    3622             :  * act upon the intermediate value, possibly leading to corruption and
    3623             :  * machine death. You have been warned.
    3624             :  */
    3625             : #define I915_WRITE64(reg, val)  dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
    3626             : #define I915_READ64(reg)        dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
    3627             : 
    3628             : #define I915_READ64_2x32(lower_reg, upper_reg) ({                       \
    3629             :         u32 upper, lower, old_upper, loop = 0;                          \
    3630             :         upper = I915_READ(upper_reg);                                   \
    3631             :         do {                                                            \
    3632             :                 old_upper = upper;                                      \
    3633             :                 lower = I915_READ(lower_reg);                           \
    3634             :                 upper = I915_READ(upper_reg);                           \
    3635             :         } while (upper != old_upper && loop++ < 2);                  \
    3636             :         (u64)upper << 32 | lower; })
    3637             : 
    3638             : #define POSTING_READ(reg)       (void)I915_READ_NOTRACE(reg)
    3639             : #define POSTING_READ16(reg)     (void)I915_READ16_NOTRACE(reg)
    3640             : 
    3641             : /* These are untraced mmio-accessors that are only valid to be used inside
    3642             :  * criticial sections inside IRQ handlers where forcewake is explicitly
    3643             :  * controlled.
    3644             :  * Think twice, and think again, before using these.
    3645             :  * Note: Should only be used between intel_uncore_forcewake_irqlock() and
    3646             :  * intel_uncore_forcewake_irqunlock().
    3647             :  */
    3648             : #ifdef __linux__
    3649             : #define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__))
    3650             : #define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__))
    3651             : #else
    3652             : #define I915_READ_FW(reg__) bus_space_read_4(dev_priv->regs->bst, dev_priv->regs->bsh, (reg__))
    3653             : #define I915_WRITE_FW(reg__, val__) bus_space_write_4(dev_priv->regs->bst, dev_priv->regs->bsh, (reg__), (val__))
    3654             : #endif
    3655             : #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
    3656             : 
    3657             : /* "Broadcast RGB" property */
    3658             : #define INTEL_BROADCAST_RGB_AUTO 0
    3659             : #define INTEL_BROADCAST_RGB_FULL 1
    3660             : #define INTEL_BROADCAST_RGB_LIMITED 2
    3661             : 
    3662           0 : static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
    3663             : {
    3664           0 :         if (IS_VALLEYVIEW(dev))
    3665           0 :                 return VLV_VGACNTRL;
    3666           0 :         else if (INTEL_INFO(dev)->gen >= 5)
    3667           0 :                 return CPU_VGACNTRL;
    3668             :         else
    3669           0 :                 return VGACNTRL;
    3670           0 : }
    3671             : 
    3672           0 : static inline void __user *to_user_ptr(u64 address)
    3673             : {
    3674           0 :         return (void __user *)(uintptr_t)address;
    3675             : }
    3676             : 
    3677           0 : static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
    3678             : {
    3679           0 :         unsigned long j = msecs_to_jiffies(m);
    3680             : 
    3681           0 :         return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
    3682             : }
    3683             : 
    3684           0 : static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
    3685             : {
    3686           0 :         return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
    3687             : }
    3688             : 
    3689             : static inline unsigned long
    3690             : timespec_to_jiffies_timeout(const struct timespec *value)
    3691             : {
    3692             :         unsigned long j = timespec_to_jiffies(value);
    3693             : 
    3694             :         return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
    3695             : }
    3696             : 
    3697             : /*
    3698             :  * If you need to wait X milliseconds between events A and B, but event B
    3699             :  * doesn't happen exactly after event A, you record the timestamp (jiffies) of
    3700             :  * when event A happened, then just before event B you call this function and
    3701             :  * pass the timestamp as the first argument, and X as the second argument.
    3702             :  */
    3703             : #ifdef __linux__
    3704             : static inline void
    3705             : wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
    3706             : {
    3707             :         unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
    3708             : 
    3709             :         /*
    3710             :          * Don't re-read the value of "jiffies" every time since it may change
    3711             :          * behind our back and break the math.
    3712             :          */
    3713             :         tmp_jiffies = jiffies;
    3714             :         target_jiffies = timestamp_jiffies +
    3715             :                          msecs_to_jiffies_timeout(to_wait_ms);
    3716             : 
    3717             :         if (time_after(target_jiffies, tmp_jiffies)) {
    3718             :                 remaining_jiffies = target_jiffies - tmp_jiffies;
    3719             :                 while (remaining_jiffies)
    3720             :                         remaining_jiffies =
    3721             :                             schedule_timeout_uninterruptible(remaining_jiffies);
    3722             :         }
    3723             : }
    3724             : #else
    3725             : static inline void
    3726           0 : wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
    3727             : {
    3728           0 :         unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
    3729             : 
    3730           0 :         if (cold) {
    3731           0 :                 delay(to_wait_ms * 1000);
    3732           0 :                 return;
    3733             :         }
    3734             : 
    3735             :         /*
    3736             :          * Don't re-read the value of "jiffies" every time since it may change
    3737             :          * behind our back and break the math.
    3738             :          */
    3739           0 :         tmp_jiffies = jiffies;
    3740           0 :         target_jiffies = timestamp_jiffies +
    3741           0 :                          msecs_to_jiffies_timeout(to_wait_ms);
    3742             : 
    3743           0 :         while (time_after(target_jiffies, tmp_jiffies)) {
    3744           0 :                 remaining_jiffies = target_jiffies - tmp_jiffies;
    3745           0 :                 tsleep(&tmp_jiffies, PWAIT, "wrmfj", remaining_jiffies);
    3746           0 :                 tmp_jiffies = jiffies;
    3747             :         }
    3748           0 : }
    3749             : #endif
    3750             : 
    3751             : static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
    3752             :                                       struct drm_i915_gem_request *req)
    3753             : {
    3754             :         if (ring->trace_irq_req == NULL && ring->irq_get(ring))
    3755             :                 i915_gem_request_assign(&ring->trace_irq_req, req);
    3756             : }
    3757             : 
    3758             : #endif

Generated by: LCOV version 1.13