LCOV - code coverage report
Current view: top level - dev/pci/drm/i915 - intel_ringbuffer.h (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 27 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 8 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : #ifndef _INTEL_RINGBUFFER_H_
       2             : #define _INTEL_RINGBUFFER_H_
       3             : 
       4             : #ifdef __linux__
       5             : #include <linux/hashtable.h>
       6             : #endif
       7             : #include "i915_gem_batch_pool.h"
       8             : 
       9             : #define I915_CMD_HASH_ORDER 9
      10             : 
      11             : /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
      12             :  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
      13             :  * to give some inclination as to some of the magic values used in the various
      14             :  * workarounds!
      15             :  */
      16             : #define CACHELINE_BYTES 64
      17             : #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
      18             : 
      19             : /*
      20             :  * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
      21             :  * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
      22             :  * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
      23             :  *
      24             :  * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
      25             :  * cacheline, the Head Pointer must not be greater than the Tail
      26             :  * Pointer."
      27             :  */
      28             : #define I915_RING_FREE_SPACE 64
      29             : 
      30             : struct  intel_hw_status_page {
      31             :         u32             *page_addr;
      32             :         unsigned int    gfx_addr;
      33             :         struct          drm_i915_gem_object *obj;
      34             : };
      35             : 
      36             : #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
      37             : #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
      38             : 
      39             : #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
      40             : #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
      41             : 
      42             : #define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
      43             : #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
      44             : 
      45             : #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
      46             : #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
      47             : 
      48             : #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
      49             : #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
      50             : 
      51             : #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
      52             : #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
      53             : 
      54             : /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
      55             :  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
      56             :  */
      57             : #define i915_semaphore_seqno_size sizeof(uint64_t)
      58             : #define GEN8_SIGNAL_OFFSET(__ring, to)                       \
      59             :         (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
      60             :         ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) +        \
      61             :         (i915_semaphore_seqno_size * (to)))
      62             : 
      63             : #define GEN8_WAIT_OFFSET(__ring, from)                       \
      64             :         (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
      65             :         ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
      66             :         (i915_semaphore_seqno_size * (__ring)->id))
      67             : 
      68             : #define GEN8_RING_SEMAPHORE_INIT do { \
      69             :         if (!dev_priv->semaphore_obj) { \
      70             :                 break; \
      71             :         } \
      72             :         ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
      73             :         ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
      74             :         ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
      75             :         ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
      76             :         ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
      77             :         ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
      78             :         } while(0)
      79             : 
      80             : enum intel_ring_hangcheck_action {
      81             :         HANGCHECK_IDLE = 0,
      82             :         HANGCHECK_WAIT,
      83             :         HANGCHECK_ACTIVE,
      84             :         HANGCHECK_ACTIVE_LOOP,
      85             :         HANGCHECK_KICK,
      86             :         HANGCHECK_HUNG,
      87             : };
      88             : 
      89             : #define HANGCHECK_SCORE_RING_HUNG 31
      90             : 
      91             : struct intel_ring_hangcheck {
      92             :         u64 acthd;
      93             :         u64 max_acthd;
      94             :         u32 seqno;
      95             :         int score;
      96             :         enum intel_ring_hangcheck_action action;
      97             :         int deadlock;
      98             : };
      99             : 
     100             : struct intel_ringbuffer {
     101             :         struct drm_i915_gem_object *obj;
     102             :         void __iomem *virtual_start;
     103             :         bus_space_handle_t bsh;
     104             : 
     105             :         struct intel_engine_cs *ring;
     106             : 
     107             :         u32 head;
     108             :         u32 tail;
     109             :         int space;
     110             :         int size;
     111             :         int effective_size;
     112             :         int reserved_size;
     113             :         int reserved_tail;
     114             :         bool reserved_in_use;
     115             : 
     116             :         /** We track the position of the requests in the ring buffer, and
     117             :          * when each is retired we increment last_retired_head as the GPU
     118             :          * must have finished processing the request and so we know we
     119             :          * can advance the ringbuffer up to that position.
     120             :          *
     121             :          * last_retired_head is set to -1 after the value is consumed so
     122             :          * we can detect new retirements.
     123             :          */
     124             :         u32 last_retired_head;
     125             : };
     126             : 
     127             : struct  intel_context;
     128             : struct drm_i915_reg_descriptor;
     129             : 
     130             : /*
     131             :  * we use a single page to load ctx workarounds so all of these
     132             :  * values are referred in terms of dwords
     133             :  *
     134             :  * struct i915_wa_ctx_bb:
     135             :  *  offset: specifies batch starting position, also helpful in case
     136             :  *    if we want to have multiple batches at different offsets based on
     137             :  *    some criteria. It is not a requirement at the moment but provides
     138             :  *    an option for future use.
     139             :  *  size: size of the batch in DWORDS
     140             :  */
     141             : struct  i915_ctx_workarounds {
     142             :         struct i915_wa_ctx_bb {
     143             :                 u32 offset;
     144             :                 u32 size;
     145             :         } indirect_ctx, per_ctx;
     146             :         struct drm_i915_gem_object *obj;
     147             : };
     148             : 
     149             : struct  intel_engine_cs {
     150             :         const char      *name;
     151             :         enum intel_ring_id {
     152             :                 RCS = 0x0,
     153             :                 VCS,
     154             :                 BCS,
     155             :                 VECS,
     156             :                 VCS2
     157             :         } id;
     158             : #define I915_NUM_RINGS 5
     159             : #define LAST_USER_RING (VECS + 1)
     160             :         u32             mmio_base;
     161             :         struct          drm_device *dev;
     162             :         struct intel_ringbuffer *buffer;
     163             : 
     164             :         /*
     165             :          * A pool of objects to use as shadow copies of client batch buffers
     166             :          * when the command parser is enabled. Prevents the client from
     167             :          * modifying the batch contents after software parsing.
     168             :          */
     169             :         struct i915_gem_batch_pool batch_pool;
     170             : 
     171             :         struct intel_hw_status_page status_page;
     172             :         struct i915_ctx_workarounds wa_ctx;
     173             : 
     174             :         unsigned irq_refcount; /* protected by dev_priv->irq_lock */
     175             :         u32             irq_enable_mask;        /* bitmask to enable ring interrupt */
     176             :         struct drm_i915_gem_request *trace_irq_req;
     177             :         bool __must_check (*irq_get)(struct intel_engine_cs *ring);
     178             :         void            (*irq_put)(struct intel_engine_cs *ring);
     179             : 
     180             :         int             (*init_hw)(struct intel_engine_cs *ring);
     181             : 
     182             :         int             (*init_context)(struct drm_i915_gem_request *req);
     183             : 
     184             :         void            (*write_tail)(struct intel_engine_cs *ring,
     185             :                                       u32 value);
     186             :         int __must_check (*flush)(struct drm_i915_gem_request *req,
     187             :                                   u32   invalidate_domains,
     188             :                                   u32   flush_domains);
     189             :         int             (*add_request)(struct drm_i915_gem_request *req);
     190             :         /* Some chipsets are not quite as coherent as advertised and need
     191             :          * an expensive kick to force a true read of the up-to-date seqno.
     192             :          * However, the up-to-date seqno is not always required and the last
     193             :          * seen value is good enough. Note that the seqno will always be
     194             :          * monotonic, even if not coherent.
     195             :          */
     196             :         u32             (*get_seqno)(struct intel_engine_cs *ring,
     197             :                                      bool lazy_coherency);
     198             :         void            (*set_seqno)(struct intel_engine_cs *ring,
     199             :                                      u32 seqno);
     200             :         int             (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
     201             :                                                u64 offset, u32 length,
     202             :                                                unsigned dispatch_flags);
     203             : #define I915_DISPATCH_SECURE 0x1
     204             : #define I915_DISPATCH_PINNED 0x2
     205             : #define I915_DISPATCH_RS     0x4
     206             :         void            (*cleanup)(struct intel_engine_cs *ring);
     207             : 
     208             :         /* GEN8 signal/wait table - never trust comments!
     209             :          *        signal to     signal to    signal to   signal to      signal to
     210             :          *          RCS            VCS          BCS        VECS          VCS2
     211             :          *      --------------------------------------------------------------------
     212             :          *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
     213             :          *      |-------------------------------------------------------------------
     214             :          *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
     215             :          *      |-------------------------------------------------------------------
     216             :          *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
     217             :          *      |-------------------------------------------------------------------
     218             :          * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
     219             :          *      |-------------------------------------------------------------------
     220             :          * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
     221             :          *      |-------------------------------------------------------------------
     222             :          *
     223             :          * Generalization:
     224             :          *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
     225             :          *  ie. transpose of g(x, y)
     226             :          *
     227             :          *       sync from      sync from    sync from    sync from     sync from
     228             :          *          RCS            VCS          BCS        VECS          VCS2
     229             :          *      --------------------------------------------------------------------
     230             :          *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
     231             :          *      |-------------------------------------------------------------------
     232             :          *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
     233             :          *      |-------------------------------------------------------------------
     234             :          *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
     235             :          *      |-------------------------------------------------------------------
     236             :          * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
     237             :          *      |-------------------------------------------------------------------
     238             :          * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
     239             :          *      |-------------------------------------------------------------------
     240             :          *
     241             :          * Generalization:
     242             :          *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
     243             :          *  ie. transpose of f(x, y)
     244             :          */
     245             :         struct {
     246             :                 u32     sync_seqno[I915_NUM_RINGS-1];
     247             : 
     248             :                 union {
     249             :                         struct {
     250             :                                 /* our mbox written by others */
     251             :                                 u32             wait[I915_NUM_RINGS];
     252             :                                 /* mboxes this ring signals to */
     253             :                                 u32             signal[I915_NUM_RINGS];
     254             :                         } mbox;
     255             :                         u64             signal_ggtt[I915_NUM_RINGS];
     256             :                 };
     257             : 
     258             :                 /* AKA wait() */
     259             :                 int     (*sync_to)(struct drm_i915_gem_request *to_req,
     260             :                                    struct intel_engine_cs *from,
     261             :                                    u32 seqno);
     262             :                 int     (*signal)(struct drm_i915_gem_request *signaller_req,
     263             :                                   /* num_dwords needed by caller */
     264             :                                   unsigned int num_dwords);
     265             :         } semaphore;
     266             : 
     267             :         /* Execlists */
     268             :         spinlock_t execlist_lock;
     269             :         struct list_head execlist_queue;
     270             :         struct list_head execlist_retired_req_list;
     271             :         u8 next_context_status_buffer;
     272             :         u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
     273             :         int             (*emit_request)(struct drm_i915_gem_request *request);
     274             :         int             (*emit_flush)(struct drm_i915_gem_request *request,
     275             :                                       u32 invalidate_domains,
     276             :                                       u32 flush_domains);
     277             :         int             (*emit_bb_start)(struct drm_i915_gem_request *req,
     278             :                                          u64 offset, unsigned dispatch_flags);
     279             : 
     280             :         /**
     281             :          * List of objects currently involved in rendering from the
     282             :          * ringbuffer.
     283             :          *
     284             :          * Includes buffers having the contents of their GPU caches
     285             :          * flushed, not necessarily primitives.  last_read_req
     286             :          * represents when the rendering involved will be completed.
     287             :          *
     288             :          * A reference is held on the buffer while on this list.
     289             :          */
     290             :         struct list_head active_list;
     291             : 
     292             :         /**
     293             :          * List of breadcrumbs associated with GPU requests currently
     294             :          * outstanding.
     295             :          */
     296             :         struct list_head request_list;
     297             : 
     298             :         /**
     299             :          * Seqno of request most recently submitted to request_list.
     300             :          * Used exclusively by hang checker to avoid grabbing lock while
     301             :          * inspecting request list.
     302             :          */
     303             :         u32 last_submitted_seqno;
     304             : 
     305             :         bool gpu_caches_dirty;
     306             : 
     307             :         wait_queue_head_t irq_queue;
     308             : 
     309             :         struct intel_context *default_context;
     310             :         struct intel_context *last_context;
     311             : 
     312             :         struct intel_ring_hangcheck hangcheck;
     313             : 
     314             :         struct {
     315             :                 struct drm_i915_gem_object *obj;
     316             :                 u32 gtt_offset;
     317             :                 volatile u32 *cpu_page;
     318             :         } scratch;
     319             : 
     320             :         bool needs_cmd_parser;
     321             : 
     322             :         /*
     323             :          * Table of commands the command parser needs to know about
     324             :          * for this ring.
     325             :          */
     326             :         DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
     327             : 
     328             :         /*
     329             :          * Table of registers allowed in commands that read/write registers.
     330             :          */
     331             :         const struct drm_i915_reg_descriptor *reg_table;
     332             :         int reg_count;
     333             : 
     334             :         /*
     335             :          * Table of registers allowed in commands that read/write registers, but
     336             :          * only from the DRM master.
     337             :          */
     338             :         const struct drm_i915_reg_descriptor *master_reg_table;
     339             :         int master_reg_count;
     340             : 
     341             :         /*
     342             :          * Returns the bitmask for the length field of the specified command.
     343             :          * Return 0 for an unrecognized/invalid command.
     344             :          *
     345             :          * If the command parser finds an entry for a command in the ring's
     346             :          * cmd_tables, it gets the command's length based on the table entry.
     347             :          * If not, it calls this function to determine the per-ring length field
     348             :          * encoding for the command (i.e. certain opcode ranges use certain bits
     349             :          * to encode the command length in the header).
     350             :          */
     351             :         u32 (*get_cmd_length_mask)(u32 cmd_header);
     352             : };
     353             : 
     354             : bool intel_ring_initialized(struct intel_engine_cs *ring);
     355             : 
     356             : static inline unsigned
     357           0 : intel_ring_flag(struct intel_engine_cs *ring)
     358             : {
     359           0 :         return 1 << ring->id;
     360             : }
     361             : 
     362             : static inline u32
     363           0 : intel_ring_sync_index(struct intel_engine_cs *ring,
     364             :                       struct intel_engine_cs *other)
     365             : {
     366             :         int idx;
     367             : 
     368             :         /*
     369             :          * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
     370             :          * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
     371             :          * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
     372             :          * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
     373             :          * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
     374             :          */
     375             : 
     376           0 :         idx = (other - ring) - 1;
     377           0 :         if (idx < 0)
     378           0 :                 idx += I915_NUM_RINGS;
     379             : 
     380           0 :         return idx;
     381             : }
     382             : 
     383             : static inline void
     384           0 : intel_flush_status_page(struct intel_engine_cs *ring, int reg)
     385             : {
     386           0 :         drm_clflush_virt_range(&ring->status_page.page_addr[reg],
     387             :                                sizeof(uint32_t));
     388           0 : }
     389             : 
     390             : static inline u32
     391           0 : intel_read_status_page(struct intel_engine_cs *ring,
     392             :                        int reg)
     393             : {
     394             :         /* Ensure that the compiler doesn't optimize away the load. */
     395           0 :         barrier();
     396           0 :         return ring->status_page.page_addr[reg];
     397             : }
     398             : 
     399             : static inline void
     400           0 : intel_write_status_page(struct intel_engine_cs *ring,
     401             :                         int reg, u32 value)
     402             : {
     403           0 :         ring->status_page.page_addr[reg] = value;
     404           0 : }
     405             : 
     406             : /**
     407             :  * Reads a dword out of the status page, which is written to from the command
     408             :  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
     409             :  * MI_STORE_DATA_IMM.
     410             :  *
     411             :  * The following dwords have a reserved meaning:
     412             :  * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
     413             :  * 0x04: ring 0 head pointer
     414             :  * 0x05: ring 1 head pointer (915-class)
     415             :  * 0x06: ring 2 head pointer (915-class)
     416             :  * 0x10-0x1b: Context status DWords (GM45)
     417             :  * 0x1f: Last written status offset. (GM45)
     418             :  * 0x20-0x2f: Reserved (Gen6+)
     419             :  *
     420             :  * The area from dword 0x30 to 0x3ff is available for driver usage.
     421             :  */
     422             : #define I915_GEM_HWS_INDEX              0x30
     423             : #define I915_GEM_HWS_SCRATCH_INDEX      0x40
     424             : #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
     425             : 
     426             : struct intel_ringbuffer *
     427             : intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
     428             : int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
     429             :                                      struct intel_ringbuffer *ringbuf);
     430             : void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
     431             : void intel_ringbuffer_free(struct intel_ringbuffer *ring);
     432             : 
     433             : void intel_stop_ring_buffer(struct intel_engine_cs *ring);
     434             : void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
     435             : 
     436             : int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
     437             : 
     438             : int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
     439             : int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
     440           0 : static inline void intel_ring_emit(struct intel_engine_cs *ring,
     441             :                                    u32 data)
     442             : {
     443           0 :         struct intel_ringbuffer *ringbuf = ring->buffer;
     444           0 :         iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
     445           0 :         ringbuf->tail += 4;
     446           0 : }
     447           0 : static inline void intel_ring_advance(struct intel_engine_cs *ring)
     448             : {
     449           0 :         struct intel_ringbuffer *ringbuf = ring->buffer;
     450           0 :         ringbuf->tail &= ringbuf->size - 1;
     451           0 : }
     452             : int __intel_ring_space(int head, int tail, int size);
     453             : void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
     454             : int intel_ring_space(struct intel_ringbuffer *ringbuf);
     455             : bool intel_ring_stopped(struct intel_engine_cs *ring);
     456             : 
     457             : int __must_check intel_ring_idle(struct intel_engine_cs *ring);
     458             : void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
     459             : int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
     460             : int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
     461             : 
     462             : void intel_fini_pipe_control(struct intel_engine_cs *ring);
     463             : int intel_init_pipe_control(struct intel_engine_cs *ring);
     464             : 
     465             : int intel_init_render_ring_buffer(struct drm_device *dev);
     466             : int intel_init_bsd_ring_buffer(struct drm_device *dev);
     467             : int intel_init_bsd2_ring_buffer(struct drm_device *dev);
     468             : int intel_init_blt_ring_buffer(struct drm_device *dev);
     469             : int intel_init_vebox_ring_buffer(struct drm_device *dev);
     470             : 
     471             : u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
     472             : 
     473             : int init_workarounds_ring(struct intel_engine_cs *ring);
     474             : 
     475           0 : static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
     476             : {
     477           0 :         return ringbuf->tail;
     478             : }
     479             : 
     480             : /*
     481             :  * Arbitrary size for largest possible 'add request' sequence. The code paths
     482             :  * are complex and variable. Empirical measurement shows that the worst case
     483             :  * is ILK at 136 words. Reserving too much is better than reserving too little
     484             :  * as that allows for corner cases that might have been missed. So the figure
     485             :  * has been rounded up to 160 words.
     486             :  */
     487             : #define MIN_SPACE_FOR_ADD_REQUEST       160
     488             : 
     489             : /*
     490             :  * Reserve space in the ring to guarantee that the i915_add_request() call
     491             :  * will always have sufficient room to do its stuff. The request creation
     492             :  * code calls this automatically.
     493             :  */
     494             : void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
     495             : /* Cancel the reservation, e.g. because the request is being discarded. */
     496             : void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
     497             : /* Use the reserved space - for use by i915_add_request() only. */
     498             : void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
     499             : /* Finish with the reserved space - for use by i915_add_request() only. */
     500             : void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
     501             : 
     502             : /* Legacy ringbuffer specific portion of reservation code: */
     503             : int intel_ring_reserve_space(struct drm_i915_gem_request *request);
     504             : 
     505             : #endif /* _INTEL_RINGBUFFER_H_ */

Generated by: LCOV version 1.13