Line data Source code
1 : /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 : */
3 : /*
4 : * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 : * All Rights Reserved.
6 : *
7 : * Permission is hereby granted, free of charge, to any person obtaining a
8 : * copy of this software and associated documentation files (the
9 : * "Software"), to deal in the Software without restriction, including
10 : * without limitation the rights to use, copy, modify, merge, publish,
11 : * distribute, sub license, and/or sell copies of the Software, and to
12 : * permit persons to whom the Software is furnished to do so, subject to
13 : * the following conditions:
14 : *
15 : * The above copyright notice and this permission notice (including the
16 : * next paragraph) shall be included in all copies or substantial portions
17 : * of the Software.
18 : *
19 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 : * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 : * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 : * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 : * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 : * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 : * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 : *
27 : */
28 :
29 : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 :
31 : #ifdef __linux__
32 : #include <linux/async.h>
33 : #endif
34 : #include <dev/pci/drm/drmP.h>
35 : #include <dev/pci/drm/drm_crtc_helper.h>
36 : #include <dev/pci/drm/drm_fb_helper.h>
37 : #ifdef __linux__
38 : #include <drm/drm_legacy.h>
39 : #endif
40 : #include "intel_drv.h"
41 : #include <dev/pci/drm/i915_drm.h>
42 : #include "i915_drv.h"
43 : #include "i915_vgpu.h"
44 : #include "i915_trace.h"
45 : #ifdef __linux__
46 : #include <linux/pci.h>
47 : #include <linux/console.h>
48 : #include <linux/vt.h>
49 : #include <linux/vgaarb.h>
50 : #include <linux/acpi.h>
51 : #include <linux/pnp.h>
52 : #include <linux/vga_switcheroo.h>
53 : #include <linux/slab.h>
54 : #include <acpi/video.h>
55 : #include <linux/pm.h>
56 : #include <linux/pm_runtime.h>
57 : #include <linux/oom.h>
58 : #endif
59 :
60 :
61 0 : static int i915_getparam(struct drm_device *dev, void *data,
62 : struct drm_file *file_priv)
63 : {
64 0 : struct drm_i915_private *dev_priv = dev->dev_private;
65 0 : drm_i915_getparam_t *param = data;
66 0 : int value;
67 :
68 0 : switch (param->param) {
69 : case I915_PARAM_IRQ_ACTIVE:
70 : case I915_PARAM_ALLOW_BATCHBUFFER:
71 : case I915_PARAM_LAST_DISPATCH:
72 : /* Reject all old ums/dri params. */
73 0 : return -ENODEV;
74 : case I915_PARAM_CHIPSET_ID:
75 0 : value = dev->pdev->device;
76 0 : break;
77 : case I915_PARAM_REVISION:
78 0 : value = dev->pdev->revision;
79 0 : break;
80 : case I915_PARAM_HAS_GEM:
81 0 : value = 1;
82 0 : break;
83 : case I915_PARAM_NUM_FENCES_AVAIL:
84 0 : value = dev_priv->num_fence_regs;
85 0 : break;
86 : case I915_PARAM_HAS_OVERLAY:
87 0 : value = dev_priv->overlay ? 1 : 0;
88 0 : break;
89 : case I915_PARAM_HAS_PAGEFLIPPING:
90 0 : value = 1;
91 0 : break;
92 : case I915_PARAM_HAS_EXECBUF2:
93 : /* depends on GEM */
94 0 : value = 1;
95 0 : break;
96 : case I915_PARAM_HAS_BSD:
97 0 : value = intel_ring_initialized(&dev_priv->ring[VCS]);
98 0 : break;
99 : case I915_PARAM_HAS_BLT:
100 0 : value = intel_ring_initialized(&dev_priv->ring[BCS]);
101 0 : break;
102 : case I915_PARAM_HAS_VEBOX:
103 0 : value = intel_ring_initialized(&dev_priv->ring[VECS]);
104 0 : break;
105 : case I915_PARAM_HAS_BSD2:
106 0 : value = intel_ring_initialized(&dev_priv->ring[VCS2]);
107 0 : break;
108 : case I915_PARAM_HAS_RELAXED_FENCING:
109 0 : value = 1;
110 0 : break;
111 : case I915_PARAM_HAS_COHERENT_RINGS:
112 0 : value = 1;
113 0 : break;
114 : case I915_PARAM_HAS_EXEC_CONSTANTS:
115 0 : value = INTEL_INFO(dev)->gen >= 4;
116 0 : break;
117 : case I915_PARAM_HAS_RELAXED_DELTA:
118 0 : value = 1;
119 0 : break;
120 : case I915_PARAM_HAS_GEN7_SOL_RESET:
121 0 : value = 1;
122 0 : break;
123 : case I915_PARAM_HAS_LLC:
124 0 : value = HAS_LLC(dev);
125 0 : break;
126 : case I915_PARAM_HAS_WT:
127 0 : value = HAS_WT(dev);
128 0 : break;
129 : case I915_PARAM_HAS_ALIASING_PPGTT:
130 0 : value = USES_PPGTT(dev);
131 0 : break;
132 : case I915_PARAM_HAS_WAIT_TIMEOUT:
133 0 : value = 1;
134 0 : break;
135 : case I915_PARAM_HAS_SEMAPHORES:
136 0 : value = i915_semaphore_is_enabled(dev);
137 0 : break;
138 : case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
139 0 : value = 1;
140 0 : break;
141 : case I915_PARAM_HAS_SECURE_BATCHES:
142 0 : value = capable(CAP_SYS_ADMIN);
143 0 : break;
144 : case I915_PARAM_HAS_PINNED_BATCHES:
145 0 : value = 1;
146 0 : break;
147 : case I915_PARAM_HAS_EXEC_NO_RELOC:
148 0 : value = 1;
149 0 : break;
150 : case I915_PARAM_HAS_EXEC_HANDLE_LUT:
151 0 : value = 1;
152 0 : break;
153 : case I915_PARAM_CMD_PARSER_VERSION:
154 0 : value = i915_cmd_parser_get_version();
155 0 : break;
156 : case I915_PARAM_HAS_COHERENT_PHYS_GTT:
157 0 : value = 1;
158 0 : break;
159 : case I915_PARAM_MMAP_VERSION:
160 0 : value = 1;
161 0 : break;
162 : case I915_PARAM_SUBSLICE_TOTAL:
163 0 : value = INTEL_INFO(dev)->subslice_total;
164 0 : if (!value)
165 0 : return -ENODEV;
166 : break;
167 : case I915_PARAM_EU_TOTAL:
168 0 : value = INTEL_INFO(dev)->eu_total;
169 0 : if (!value)
170 0 : return -ENODEV;
171 : break;
172 : case I915_PARAM_HAS_GPU_RESET:
173 0 : value = i915.enable_hangcheck &&
174 0 : intel_has_gpu_reset(dev);
175 0 : break;
176 : case I915_PARAM_HAS_RESOURCE_STREAMER:
177 0 : value = HAS_RESOURCE_STREAMER(dev);
178 0 : break;
179 : default:
180 : DRM_DEBUG("Unknown parameter %d\n", param->param);
181 0 : return -EINVAL;
182 : }
183 :
184 0 : if (copy_to_user(param->value, &value, sizeof(int))) {
185 0 : DRM_ERROR("copy_to_user failed\n");
186 0 : return -EFAULT;
187 : }
188 :
189 0 : return 0;
190 0 : }
191 :
192 : #ifdef __linux__
193 : static int i915_get_bridge_dev(struct drm_device *dev)
194 : {
195 : struct drm_i915_private *dev_priv = dev->dev_private;
196 :
197 : dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
198 : if (!dev_priv->bridge_dev) {
199 : DRM_ERROR("bridge device not found\n");
200 : return -1;
201 : }
202 : return 0;
203 : }
204 : #else
205 0 : int i915_get_bridge_dev(struct drm_device *dev)
206 : {
207 0 : struct drm_i915_private *dev_priv = dev->dev_private;
208 :
209 0 : dev_priv->bridge_dev = malloc(sizeof(*dev_priv->bridge_dev),
210 : M_DEVBUF, M_WAITOK);
211 0 : dev_priv->bridge_dev->pc = dev->pdev->pc;
212 0 : dev_priv->bridge_dev->tag = pci_make_tag(dev->pdev->pc, 0, 0, 0);
213 0 : return 0;
214 : }
215 : #endif
216 :
217 : #define MCHBAR_I915 0x44
218 : #define MCHBAR_I965 0x48
219 : #define MCHBAR_SIZE (4*4096)
220 :
221 : #define DEVEN_REG 0x54
222 : #define DEVEN_MCHBAR_EN (1 << 28)
223 :
224 : /* Allocate space for the MCH regs if needed, return nonzero on error */
225 : static int
226 0 : intel_alloc_mchbar_resource(struct drm_device *dev)
227 : {
228 0 : struct drm_i915_private *dev_priv = dev->dev_private;
229 0 : int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
230 0 : u32 temp_lo, temp_hi = 0;
231 : u64 mchbar_addr;
232 : #ifdef __linux__
233 : int ret;
234 : #endif
235 :
236 0 : if (INTEL_INFO(dev)->gen >= 4)
237 0 : pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
238 0 : pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
239 0 : mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
240 :
241 : #ifdef __linux__
242 : /* If ACPI doesn't have it, assume we need to allocate it ourselves */
243 : #ifdef CONFIG_PNP
244 : if (mchbar_addr &&
245 : pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
246 : return 0;
247 : #endif
248 : #else
249 0 : if (mchbar_addr)
250 0 : return 0;
251 : #endif
252 :
253 : /* Get some space for it */
254 : #ifdef __linux__
255 : dev_priv->mch_res.name = "i915 MCHBAR";
256 : dev_priv->mch_res.flags = IORESOURCE_MEM;
257 : ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
258 : &dev_priv->mch_res,
259 : MCHBAR_SIZE, MCHBAR_SIZE,
260 : PCIBIOS_MIN_MEM,
261 : 0, pcibios_align_resource,
262 : dev_priv->bridge_dev);
263 : if (ret) {
264 : DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
265 : dev_priv->mch_res.start = 0;
266 : return ret;
267 : }
268 : #else
269 0 : if (dev_priv->memex == NULL || extent_alloc(dev_priv->memex,
270 : MCHBAR_SIZE, MCHBAR_SIZE, 0, 0, 0, &dev_priv->mch_res.start)) {
271 0 : return -ENOMEM;
272 : }
273 : #endif
274 :
275 0 : if (INTEL_INFO(dev)->gen >= 4)
276 0 : pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
277 0 : upper_32_bits(dev_priv->mch_res.start));
278 :
279 0 : pci_write_config_dword(dev_priv->bridge_dev, reg,
280 0 : lower_32_bits(dev_priv->mch_res.start));
281 0 : return 0;
282 0 : }
283 :
284 : /* Setup MCHBAR if possible, return true if we should disable it again */
285 : static void
286 0 : intel_setup_mchbar(struct drm_device *dev)
287 : {
288 0 : struct drm_i915_private *dev_priv = dev->dev_private;
289 0 : int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
290 0 : u32 temp;
291 : bool enabled;
292 :
293 0 : if (IS_VALLEYVIEW(dev))
294 0 : return;
295 :
296 0 : dev_priv->mchbar_need_disable = false;
297 :
298 0 : if (IS_I915G(dev) || IS_I915GM(dev)) {
299 0 : pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
300 0 : enabled = !!(temp & DEVEN_MCHBAR_EN);
301 0 : } else {
302 0 : pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
303 0 : enabled = temp & 1;
304 : }
305 :
306 : /* If it's already enabled, don't have to do anything */
307 0 : if (enabled)
308 0 : return;
309 :
310 0 : if (intel_alloc_mchbar_resource(dev))
311 0 : return;
312 :
313 0 : dev_priv->mchbar_need_disable = true;
314 :
315 : /* Space is allocated or reserved, so enable it. */
316 0 : if (IS_I915G(dev) || IS_I915GM(dev)) {
317 0 : pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
318 0 : temp | DEVEN_MCHBAR_EN);
319 0 : } else {
320 0 : pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
321 0 : pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
322 : }
323 0 : }
324 :
325 : static void
326 0 : intel_teardown_mchbar(struct drm_device *dev)
327 : {
328 0 : struct drm_i915_private *dev_priv = dev->dev_private;
329 0 : int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
330 0 : u32 temp;
331 :
332 0 : if (dev_priv->mchbar_need_disable) {
333 0 : if (IS_I915G(dev) || IS_I915GM(dev)) {
334 0 : pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
335 0 : temp &= ~DEVEN_MCHBAR_EN;
336 0 : pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
337 0 : } else {
338 0 : pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
339 0 : temp &= ~1;
340 0 : pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
341 : }
342 : }
343 :
344 0 : if (dev_priv->mch_res.start)
345 : #ifdef __linux__
346 : release_resource(&dev_priv->mch_res);
347 : #else
348 0 : extent_free(dev_priv->memex, dev_priv->mch_res.start,
349 : MCHBAR_SIZE, 0);
350 : #endif
351 0 : }
352 :
353 : #ifdef __linux__
354 : /* true = enable decode, false = disable decoder */
355 : static unsigned int i915_vga_set_decode(void *cookie, bool state)
356 : {
357 : struct drm_device *dev = cookie;
358 :
359 : intel_modeset_vga_set_state(dev, state);
360 : if (state)
361 : return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
362 : VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
363 : else
364 : return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
365 : }
366 :
367 : static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
368 : {
369 : struct drm_device *dev = pci_get_drvdata(pdev);
370 : pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
371 :
372 : if (state == VGA_SWITCHEROO_ON) {
373 : pr_info("switched on\n");
374 : dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
375 : /* i915 resume handler doesn't set to D0 */
376 : pci_set_power_state(dev->pdev, PCI_D0);
377 : i915_resume_switcheroo(dev);
378 : dev->switch_power_state = DRM_SWITCH_POWER_ON;
379 : } else {
380 : pr_err("switched off\n");
381 : dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
382 : i915_suspend_switcheroo(dev, pmm);
383 : dev->switch_power_state = DRM_SWITCH_POWER_OFF;
384 : }
385 : }
386 :
387 : static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
388 : {
389 : struct drm_device *dev = pci_get_drvdata(pdev);
390 :
391 : /*
392 : * FIXME: open_count is protected by drm_global_mutex but that would lead to
393 : * locking inversion with the driver load path. And the access here is
394 : * completely racy anyway. So don't bother with locking for now.
395 : */
396 : return dev->open_count == 0;
397 : }
398 :
399 : static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
400 : .set_gpu_state = i915_switcheroo_set_state,
401 : .reprobe = NULL,
402 : .can_switch = i915_switcheroo_can_switch,
403 : };
404 : #else
405 : #define i915_vga_set_decode NULL
406 : #endif
407 :
408 0 : static int i915_load_modeset_init(struct drm_device *dev)
409 : {
410 0 : struct drm_i915_private *dev_priv = dev->dev_private;
411 : int ret;
412 :
413 0 : ret = intel_parse_bios(dev);
414 : if (ret)
415 : DRM_INFO("failed to find VBIOS tables\n");
416 :
417 : /* If we have > 1 VGA cards, then we need to arbitrate access
418 : * to the common VGA resources.
419 : *
420 : * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
421 : * then we do not take part in VGA arbitration and the
422 : * vga_client_register() fails with -ENODEV.
423 : */
424 0 : ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
425 0 : if (ret && ret != -ENODEV)
426 : goto out;
427 :
428 : #ifdef __linux__
429 : intel_register_dsm_handler();
430 : #endif
431 :
432 : ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
433 0 : if (ret)
434 : goto cleanup_vga_client;
435 :
436 : /* Initialise stolen first so that we may reserve preallocated
437 : * objects for the BIOS to KMS transition.
438 : */
439 0 : ret = i915_gem_init_stolen(dev);
440 0 : if (ret)
441 : goto cleanup_vga_switcheroo;
442 :
443 0 : intel_power_domains_init_hw(dev_priv);
444 :
445 0 : ret = intel_irq_install(dev_priv);
446 0 : if (ret)
447 : goto cleanup_gem_stolen;
448 :
449 0 : intel_setup_gmbus(dev);
450 :
451 : /* Important: The output setup functions called by modeset_init need
452 : * working irqs for e.g. gmbus and dp aux transfers. */
453 0 : intel_modeset_init(dev);
454 :
455 0 : intel_guc_ucode_init(dev);
456 :
457 0 : ret = i915_gem_init(dev);
458 0 : if (ret)
459 : goto cleanup_irq;
460 :
461 0 : intel_modeset_gem_init(dev);
462 :
463 : /* Always safe in the mode setting case. */
464 : /* FIXME: do pre/post-mode set stuff in core KMS code */
465 0 : dev->vblank_disable_allowed = true;
466 0 : if (INTEL_INFO(dev)->num_pipes == 0)
467 0 : return 0;
468 :
469 0 : ret = intel_fbdev_init(dev);
470 0 : if (ret)
471 : goto cleanup_gem;
472 :
473 : /* Only enable hotplug handling once the fbdev is fully set up. */
474 0 : intel_hpd_init(dev_priv);
475 :
476 : /*
477 : * Some ports require correctly set-up hpd registers for detection to
478 : * work properly (leading to ghost connected connector status), e.g. VGA
479 : * on gm45. Hence we can only set up the initial fbdev config after hpd
480 : * irqs are fully enabled. Now we should scan for the initial config
481 : * only once hotplug handling is enabled, but due to screwed-up locking
482 : * around kms/fbdev init we can't protect the fdbev initial config
483 : * scanning against hotplug events. Hence do this first and ignore the
484 : * tiny window where we will loose hotplug notifactions.
485 : */
486 0 : async_schedule(intel_fbdev_initial_config, dev_priv);
487 :
488 0 : drm_kms_helper_poll_init(dev);
489 :
490 0 : return 0;
491 :
492 : cleanup_gem:
493 0 : mutex_lock(&dev->struct_mutex);
494 0 : i915_gem_cleanup_ringbuffer(dev);
495 0 : i915_gem_context_fini(dev);
496 0 : mutex_unlock(&dev->struct_mutex);
497 : cleanup_irq:
498 0 : intel_guc_ucode_fini(dev);
499 0 : drm_irq_uninstall(dev);
500 0 : intel_teardown_gmbus(dev);
501 : cleanup_gem_stolen:
502 0 : i915_gem_cleanup_stolen(dev);
503 : cleanup_vga_switcheroo:
504 : vga_switcheroo_unregister_client(dev->pdev);
505 : cleanup_vga_client:
506 0 : vga_client_register(dev->pdev, NULL, NULL, NULL);
507 : out:
508 0 : return ret;
509 0 : }
510 :
511 : #if IS_ENABLED(CONFIG_FB)
512 : static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
513 : {
514 : struct apertures_struct *ap;
515 : struct pci_dev *pdev = dev_priv->dev->pdev;
516 : bool primary;
517 : int ret;
518 :
519 : ap = alloc_apertures(1);
520 : if (!ap)
521 : return -ENOMEM;
522 :
523 : ap->ranges[0].base = dev_priv->gtt.mappable_base;
524 : ap->ranges[0].size = dev_priv->gtt.mappable_end;
525 :
526 : primary =
527 : pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
528 :
529 : ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
530 :
531 : kfree(ap);
532 :
533 : return ret;
534 : }
535 : #else
536 0 : static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
537 : {
538 0 : return 0;
539 : }
540 : #endif
541 :
542 : #if !defined(CONFIG_VGA_CONSOLE)
543 0 : static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
544 : {
545 0 : return 0;
546 : }
547 : #elif !defined(CONFIG_DUMMY_CONSOLE)
548 : static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
549 : {
550 : return -ENODEV;
551 : }
552 : #else
553 : static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
554 : {
555 : int ret = 0;
556 :
557 : DRM_INFO("Replacing VGA console driver\n");
558 :
559 : console_lock();
560 : if (con_is_bound(&vga_con))
561 : ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
562 : if (ret == 0) {
563 : ret = do_unregister_con_driver(&vga_con);
564 :
565 : /* Ignore "already unregistered". */
566 : if (ret == -ENODEV)
567 : ret = 0;
568 : }
569 : console_unlock();
570 :
571 : return ret;
572 : }
573 : #endif
574 :
575 : #ifdef __linux__
576 : static void i915_dump_device_info(struct drm_i915_private *dev_priv)
577 : {
578 : const struct intel_device_info *info = &dev_priv->info;
579 :
580 : #define PRINT_S(name) "%s"
581 : #define SEP_EMPTY
582 : #define PRINT_FLAG(name) info->name ? #name "," : ""
583 : #define SEP_COMMA ,
584 : DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
585 : DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
586 : info->gen,
587 : dev_priv->dev->pdev->device,
588 : dev_priv->dev->pdev->revision,
589 : DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
590 : #undef PRINT_S
591 : #undef SEP_EMPTY
592 : #undef PRINT_FLAG
593 : #undef SEP_COMMA
594 : }
595 : #endif
596 :
597 0 : static void cherryview_sseu_info_init(struct drm_device *dev)
598 : {
599 0 : struct drm_i915_private *dev_priv = dev->dev_private;
600 : struct intel_device_info *info;
601 : u32 fuse, eu_dis;
602 :
603 0 : info = (struct intel_device_info *)&dev_priv->info;
604 0 : fuse = I915_READ(CHV_FUSE_GT);
605 :
606 0 : info->slice_total = 1;
607 :
608 0 : if (!(fuse & CHV_FGT_DISABLE_SS0)) {
609 0 : info->subslice_per_slice++;
610 0 : eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
611 : CHV_FGT_EU_DIS_SS0_R1_MASK);
612 0 : info->eu_total += 8 - hweight32(eu_dis);
613 0 : }
614 :
615 0 : if (!(fuse & CHV_FGT_DISABLE_SS1)) {
616 0 : info->subslice_per_slice++;
617 0 : eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
618 : CHV_FGT_EU_DIS_SS1_R1_MASK);
619 0 : info->eu_total += 8 - hweight32(eu_dis);
620 0 : }
621 :
622 0 : info->subslice_total = info->subslice_per_slice;
623 : /*
624 : * CHV expected to always have a uniform distribution of EU
625 : * across subslices.
626 : */
627 0 : info->eu_per_subslice = info->subslice_total ?
628 0 : info->eu_total / info->subslice_total :
629 : 0;
630 : /*
631 : * CHV supports subslice power gating on devices with more than
632 : * one subslice, and supports EU power gating on devices with
633 : * more than one EU pair per subslice.
634 : */
635 0 : info->has_slice_pg = 0;
636 0 : info->has_subslice_pg = (info->subslice_total > 1);
637 0 : info->has_eu_pg = (info->eu_per_subslice > 2);
638 0 : }
639 :
640 0 : static void gen9_sseu_info_init(struct drm_device *dev)
641 : {
642 0 : struct drm_i915_private *dev_priv = dev->dev_private;
643 : struct intel_device_info *info;
644 : int s_max = 3, ss_max = 4, eu_max = 8;
645 : int s, ss;
646 : u32 fuse2, s_enable, ss_disable, eu_disable;
647 : u8 eu_mask = 0xff;
648 :
649 0 : info = (struct intel_device_info *)&dev_priv->info;
650 0 : fuse2 = I915_READ(GEN8_FUSE2);
651 0 : s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
652 : GEN8_F2_S_ENA_SHIFT;
653 0 : ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
654 : GEN9_F2_SS_DIS_SHIFT;
655 :
656 0 : info->slice_total = hweight32(s_enable);
657 : /*
658 : * The subslice disable field is global, i.e. it applies
659 : * to each of the enabled slices.
660 : */
661 0 : info->subslice_per_slice = ss_max - hweight32(ss_disable);
662 0 : info->subslice_total = info->slice_total *
663 0 : info->subslice_per_slice;
664 :
665 : /*
666 : * Iterate through enabled slices and subslices to
667 : * count the total enabled EU.
668 : */
669 0 : for (s = 0; s < s_max; s++) {
670 0 : if (!(s_enable & (0x1 << s)))
671 : /* skip disabled slice */
672 : continue;
673 :
674 0 : eu_disable = I915_READ(GEN9_EU_DISABLE(s));
675 0 : for (ss = 0; ss < ss_max; ss++) {
676 : int eu_per_ss;
677 :
678 0 : if (ss_disable & (0x1 << ss))
679 : /* skip disabled subslice */
680 0 : continue;
681 :
682 0 : eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
683 : eu_mask);
684 :
685 : /*
686 : * Record which subslice(s) has(have) 7 EUs. we
687 : * can tune the hash used to spread work among
688 : * subslices if they are unbalanced.
689 : */
690 0 : if (eu_per_ss == 7)
691 0 : info->subslice_7eu[s] |= 1 << ss;
692 :
693 0 : info->eu_total += eu_per_ss;
694 0 : }
695 : }
696 :
697 : /*
698 : * SKL is expected to always have a uniform distribution
699 : * of EU across subslices with the exception that any one
700 : * EU in any one subslice may be fused off for die
701 : * recovery. BXT is expected to be perfectly uniform in EU
702 : * distribution.
703 : */
704 0 : info->eu_per_subslice = info->subslice_total ?
705 0 : DIV_ROUND_UP(info->eu_total,
706 : info->subslice_total) : 0;
707 : /*
708 : * SKL supports slice power gating on devices with more than
709 : * one slice, and supports EU power gating on devices with
710 : * more than one EU pair per subslice. BXT supports subslice
711 : * power gating on devices with more than one subslice, and
712 : * supports EU power gating on devices with more than one EU
713 : * pair per subslice.
714 : */
715 0 : info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
716 0 : (info->slice_total > 1));
717 0 : info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
718 0 : info->has_eu_pg = (info->eu_per_subslice > 2);
719 0 : }
720 :
721 0 : static void broadwell_sseu_info_init(struct drm_device *dev)
722 : {
723 0 : struct drm_i915_private *dev_priv = dev->dev_private;
724 : struct intel_device_info *info;
725 : const int s_max = 3, ss_max = 3, eu_max = 8;
726 : int s, ss;
727 0 : u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
728 :
729 0 : fuse2 = I915_READ(GEN8_FUSE2);
730 0 : s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
731 0 : ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
732 :
733 0 : eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
734 0 : eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
735 0 : ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
736 : (32 - GEN8_EU_DIS0_S1_SHIFT));
737 0 : eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
738 0 : ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
739 : (32 - GEN8_EU_DIS1_S2_SHIFT));
740 :
741 :
742 0 : info = (struct intel_device_info *)&dev_priv->info;
743 0 : info->slice_total = hweight32(s_enable);
744 :
745 : /*
746 : * The subslice disable field is global, i.e. it applies
747 : * to each of the enabled slices.
748 : */
749 0 : info->subslice_per_slice = ss_max - hweight32(ss_disable);
750 0 : info->subslice_total = info->slice_total * info->subslice_per_slice;
751 :
752 : /*
753 : * Iterate through enabled slices and subslices to
754 : * count the total enabled EU.
755 : */
756 0 : for (s = 0; s < s_max; s++) {
757 0 : if (!(s_enable & (0x1 << s)))
758 : /* skip disabled slice */
759 : continue;
760 :
761 0 : for (ss = 0; ss < ss_max; ss++) {
762 : u32 n_disabled;
763 :
764 0 : if (ss_disable & (0x1 << ss))
765 : /* skip disabled subslice */
766 0 : continue;
767 :
768 0 : n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
769 :
770 : /*
771 : * Record which subslices have 7 EUs.
772 : */
773 0 : if (eu_max - n_disabled == 7)
774 0 : info->subslice_7eu[s] |= 1 << ss;
775 :
776 0 : info->eu_total += eu_max - n_disabled;
777 0 : }
778 : }
779 :
780 : /*
781 : * BDW is expected to always have a uniform distribution of EU across
782 : * subslices with the exception that any one EU in any one subslice may
783 : * be fused off for die recovery.
784 : */
785 0 : info->eu_per_subslice = info->subslice_total ?
786 0 : DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
787 :
788 : /*
789 : * BDW supports slice power gating on devices with more than
790 : * one slice.
791 : */
792 0 : info->has_slice_pg = (info->slice_total > 1);
793 0 : info->has_subslice_pg = 0;
794 0 : info->has_eu_pg = 0;
795 0 : }
796 :
797 : /*
798 : * Determine various intel_device_info fields at runtime.
799 : *
800 : * Use it when either:
801 : * - it's judged too laborious to fill n static structures with the limit
802 : * when a simple if statement does the job,
803 : * - run-time checks (eg read fuse/strap registers) are needed.
804 : *
805 : * This function needs to be called:
806 : * - after the MMIO has been setup as we are reading registers,
807 : * - after the PCH has been detected,
808 : * - before the first usage of the fields it can tweak.
809 : */
810 0 : static void intel_device_info_runtime_init(struct drm_device *dev)
811 : {
812 0 : struct drm_i915_private *dev_priv = dev->dev_private;
813 : struct intel_device_info *info;
814 : enum pipe pipe;
815 :
816 0 : info = (struct intel_device_info *)&dev_priv->info;
817 :
818 : /*
819 : * Skylake and Broxton currently don't expose the topmost plane as its
820 : * use is exclusive with the legacy cursor and we only want to expose
821 : * one of those, not both. Until we can safely expose the topmost plane
822 : * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
823 : * we don't expose the topmost plane at all to prevent ABI breakage
824 : * down the line.
825 : */
826 0 : if (IS_BROXTON(dev)) {
827 0 : info->num_sprites[PIPE_A] = 2;
828 0 : info->num_sprites[PIPE_B] = 2;
829 0 : info->num_sprites[PIPE_C] = 1;
830 0 : } else if (IS_VALLEYVIEW(dev))
831 0 : for_each_pipe(dev_priv, pipe)
832 0 : info->num_sprites[pipe] = 2;
833 : else
834 0 : for_each_pipe(dev_priv, pipe)
835 0 : info->num_sprites[pipe] = 1;
836 :
837 0 : if (i915.disable_display) {
838 : DRM_INFO("Display disabled (module parameter)\n");
839 0 : info->num_pipes = 0;
840 0 : } else if (info->num_pipes > 0 &&
841 0 : (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
842 0 : !IS_VALLEYVIEW(dev)) {
843 0 : u32 fuse_strap = I915_READ(FUSE_STRAP);
844 0 : u32 sfuse_strap = I915_READ(SFUSE_STRAP);
845 :
846 : /*
847 : * SFUSE_STRAP is supposed to have a bit signalling the display
848 : * is fused off. Unfortunately it seems that, at least in
849 : * certain cases, fused off display means that PCH display
850 : * reads don't land anywhere. In that case, we read 0s.
851 : *
852 : * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
853 : * should be set when taking over after the firmware.
854 : */
855 0 : if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
856 0 : sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
857 0 : (dev_priv->pch_type == PCH_CPT &&
858 0 : !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
859 : DRM_INFO("Display fused off, disabling\n");
860 0 : info->num_pipes = 0;
861 0 : }
862 0 : } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
863 0 : u32 dfsm = I915_READ(SKL_DFSM);
864 : u8 disabled_mask = 0;
865 : bool invalid;
866 : int num_bits;
867 :
868 0 : if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
869 0 : disabled_mask |= BIT(PIPE_A);
870 0 : if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
871 0 : disabled_mask |= BIT(PIPE_B);
872 0 : if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
873 0 : disabled_mask |= BIT(PIPE_C);
874 :
875 0 : num_bits = hweight8(disabled_mask);
876 :
877 0 : switch (disabled_mask) {
878 : case BIT(PIPE_A):
879 : case BIT(PIPE_B):
880 : case BIT(PIPE_A) | BIT(PIPE_B):
881 : case BIT(PIPE_A) | BIT(PIPE_C):
882 : invalid = true;
883 0 : break;
884 : default:
885 : invalid = false;
886 0 : }
887 :
888 0 : if (num_bits > info->num_pipes || invalid)
889 0 : DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
890 : disabled_mask);
891 : else
892 0 : info->num_pipes -= num_bits;
893 0 : }
894 :
895 : /* Initialize slice/subslice/EU info */
896 0 : if (IS_CHERRYVIEW(dev))
897 0 : cherryview_sseu_info_init(dev);
898 0 : else if (IS_BROADWELL(dev))
899 0 : broadwell_sseu_info_init(dev);
900 0 : else if (INTEL_INFO(dev)->gen >= 9)
901 0 : gen9_sseu_info_init(dev);
902 :
903 : DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
904 : DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
905 : DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
906 : DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
907 : DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
908 : DRM_DEBUG_DRIVER("has slice power gating: %s\n",
909 : info->has_slice_pg ? "y" : "n");
910 : DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
911 : info->has_subslice_pg ? "y" : "n");
912 : DRM_DEBUG_DRIVER("has EU power gating: %s\n",
913 : info->has_eu_pg ? "y" : "n");
914 0 : }
915 :
916 0 : static void intel_init_dpio(struct drm_i915_private *dev_priv)
917 : {
918 0 : if (!IS_VALLEYVIEW(dev_priv))
919 : return;
920 :
921 : /*
922 : * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
923 : * CHV x1 PHY (DP/HDMI D)
924 : * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
925 : */
926 0 : if (IS_CHERRYVIEW(dev_priv)) {
927 0 : DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
928 0 : DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
929 0 : } else {
930 0 : DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
931 : }
932 0 : }
933 :
934 : #ifdef __linux__
935 : /**
936 : * i915_driver_load - setup chip and create an initial config
937 : * @dev: DRM device
938 : * @flags: startup flags
939 : *
940 : * The driver load routine has to do several things:
941 : * - drive output discovery via intel_modeset_init()
942 : * - initialize the memory manager
943 : * - allocate initial config memory
944 : * - setup the DRM framebuffer with the allocated memory
945 : */
946 : int i915_driver_load(struct drm_device *dev, unsigned long flags)
947 : {
948 : struct drm_i915_private *dev_priv;
949 : struct intel_device_info *info, *device_info;
950 : int ret = 0, mmio_bar, mmio_size;
951 : uint32_t aperture_size;
952 :
953 : info = (struct intel_device_info *) flags;
954 :
955 : dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
956 : if (dev_priv == NULL)
957 : return -ENOMEM;
958 :
959 : dev->dev_private = dev_priv;
960 : dev_priv->dev = dev;
961 :
962 : /* Setup the write-once "constant" device info */
963 : device_info = (struct intel_device_info *)&dev_priv->info;
964 : memcpy(device_info, info, sizeof(dev_priv->info));
965 : device_info->device_id = dev->pdev->device;
966 :
967 : spin_lock_init(&dev_priv->irq_lock);
968 : spin_lock_init(&dev_priv->gpu_error.lock);
969 : mutex_init(&dev_priv->backlight_lock);
970 : spin_lock_init(&dev_priv->uncore.lock);
971 : spin_lock_init(&dev_priv->mm.object_stat_lock);
972 : spin_lock_init(&dev_priv->mmio_flip_lock);
973 : mutex_init(&dev_priv->sb_lock);
974 : mutex_init(&dev_priv->modeset_restore_lock);
975 : mutex_init(&dev_priv->csr_lock);
976 : mutex_init(&dev_priv->av_mutex);
977 :
978 : intel_pm_setup(dev);
979 :
980 : intel_display_crc_init(dev);
981 :
982 : i915_dump_device_info(dev_priv);
983 :
984 : /* Not all pre-production machines fall into this category, only the
985 : * very first ones. Almost everything should work, except for maybe
986 : * suspend/resume. And we don't implement workarounds that affect only
987 : * pre-production machines. */
988 : if (IS_HSW_EARLY_SDV(dev))
989 : DRM_INFO("This is an early pre-production Haswell machine. "
990 : "It may not be fully functional.\n");
991 :
992 : if (i915_get_bridge_dev(dev)) {
993 : ret = -EIO;
994 : goto free_priv;
995 : }
996 :
997 : mmio_bar = IS_GEN2(dev) ? 1 : 0;
998 : /* Before gen4, the registers and the GTT are behind different BARs.
999 : * However, from gen4 onwards, the registers and the GTT are shared
1000 : * in the same BAR, so we want to restrict this ioremap from
1001 : * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1002 : * the register BAR remains the same size for all the earlier
1003 : * generations up to Ironlake.
1004 : */
1005 : if (info->gen < 5)
1006 : mmio_size = 512*1024;
1007 : else
1008 : mmio_size = 2*1024*1024;
1009 :
1010 : dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1011 : if (!dev_priv->regs) {
1012 : DRM_ERROR("failed to map registers\n");
1013 : ret = -EIO;
1014 : goto put_bridge;
1015 : }
1016 :
1017 : /* This must be called before any calls to HAS_PCH_* */
1018 : intel_detect_pch(dev);
1019 :
1020 : intel_uncore_init(dev);
1021 :
1022 : /* Load CSR Firmware for SKL */
1023 : intel_csr_ucode_init(dev);
1024 :
1025 : ret = i915_gem_gtt_init(dev);
1026 : if (ret)
1027 : goto out_freecsr;
1028 :
1029 : /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1030 : * otherwise the vga fbdev driver falls over. */
1031 : ret = i915_kick_out_firmware_fb(dev_priv);
1032 : if (ret) {
1033 : DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1034 : goto out_gtt;
1035 : }
1036 :
1037 : ret = i915_kick_out_vgacon(dev_priv);
1038 : if (ret) {
1039 : DRM_ERROR("failed to remove conflicting VGA console\n");
1040 : goto out_gtt;
1041 : }
1042 :
1043 : pci_set_master(dev->pdev);
1044 :
1045 : /* overlay on gen2 is broken and can't address above 1G */
1046 : if (IS_GEN2(dev))
1047 : dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1048 :
1049 : /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1050 : * using 32bit addressing, overwriting memory if HWS is located
1051 : * above 4GB.
1052 : *
1053 : * The documentation also mentions an issue with undefined
1054 : * behaviour if any general state is accessed within a page above 4GB,
1055 : * which also needs to be handled carefully.
1056 : */
1057 : if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1058 : dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1059 :
1060 : aperture_size = dev_priv->gtt.mappable_end;
1061 :
1062 : dev_priv->gtt.mappable =
1063 : io_mapping_create_wc(dev_priv->gtt.mappable_base,
1064 : aperture_size);
1065 : if (dev_priv->gtt.mappable == NULL) {
1066 : ret = -EIO;
1067 : goto out_gtt;
1068 : }
1069 :
1070 : dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1071 : aperture_size);
1072 :
1073 : /* The i915 workqueue is primarily used for batched retirement of
1074 : * requests (and thus managing bo) once the task has been completed
1075 : * by the GPU. i915_gem_retire_requests() is called directly when we
1076 : * need high-priority retirement, such as waiting for an explicit
1077 : * bo.
1078 : *
1079 : * It is also used for periodic low-priority events, such as
1080 : * idle-timers and recording error state.
1081 : *
1082 : * All tasks on the workqueue are expected to acquire the dev mutex
1083 : * so there is no point in running more than one instance of the
1084 : * workqueue at any time. Use an ordered one.
1085 : */
1086 : dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1087 : if (dev_priv->wq == NULL) {
1088 : DRM_ERROR("Failed to create our workqueue.\n");
1089 : ret = -ENOMEM;
1090 : goto out_mtrrfree;
1091 : }
1092 :
1093 : dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1094 : if (dev_priv->hotplug.dp_wq == NULL) {
1095 : DRM_ERROR("Failed to create our dp workqueue.\n");
1096 : ret = -ENOMEM;
1097 : goto out_freewq;
1098 : }
1099 :
1100 : dev_priv->gpu_error.hangcheck_wq =
1101 : alloc_ordered_workqueue("i915-hangcheck", 0);
1102 : if (dev_priv->gpu_error.hangcheck_wq == NULL) {
1103 : DRM_ERROR("Failed to create our hangcheck workqueue.\n");
1104 : ret = -ENOMEM;
1105 : goto out_freedpwq;
1106 : }
1107 :
1108 : intel_irq_init(dev_priv);
1109 : intel_uncore_sanitize(dev);
1110 :
1111 : /* Try to make sure MCHBAR is enabled before poking at it */
1112 : intel_setup_mchbar(dev);
1113 : intel_opregion_setup(dev);
1114 :
1115 : i915_gem_load(dev);
1116 :
1117 : /* On the 945G/GM, the chipset reports the MSI capability on the
1118 : * integrated graphics even though the support isn't actually there
1119 : * according to the published specs. It doesn't appear to function
1120 : * correctly in testing on 945G.
1121 : * This may be a side effect of MSI having been made available for PEG
1122 : * and the registers being closely associated.
1123 : *
1124 : * According to chipset errata, on the 965GM, MSI interrupts may
1125 : * be lost or delayed, but we use them anyways to avoid
1126 : * stuck interrupts on some machines.
1127 : */
1128 : if (!IS_I945G(dev) && !IS_I945GM(dev))
1129 : pci_enable_msi(dev->pdev);
1130 :
1131 : intel_device_info_runtime_init(dev);
1132 :
1133 : intel_init_dpio(dev_priv);
1134 :
1135 : if (INTEL_INFO(dev)->num_pipes) {
1136 : ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1137 : if (ret)
1138 : goto out_gem_unload;
1139 : }
1140 :
1141 : intel_power_domains_init(dev_priv);
1142 :
1143 : ret = i915_load_modeset_init(dev);
1144 : if (ret < 0) {
1145 : DRM_ERROR("failed to init modeset\n");
1146 : goto out_power_well;
1147 : }
1148 :
1149 : /*
1150 : * Notify a valid surface after modesetting,
1151 : * when running inside a VM.
1152 : */
1153 : if (intel_vgpu_active(dev))
1154 : I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1155 :
1156 : i915_setup_sysfs(dev);
1157 :
1158 : if (INTEL_INFO(dev)->num_pipes) {
1159 : /* Must be done after probing outputs */
1160 : intel_opregion_init(dev);
1161 : acpi_video_register();
1162 : }
1163 :
1164 : if (IS_GEN5(dev))
1165 : intel_gpu_ips_init(dev_priv);
1166 :
1167 : intel_runtime_pm_enable(dev_priv);
1168 :
1169 : i915_audio_component_init(dev_priv);
1170 :
1171 : return 0;
1172 :
1173 : out_power_well:
1174 : intel_power_domains_fini(dev_priv);
1175 : drm_vblank_cleanup(dev);
1176 : out_gem_unload:
1177 : WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1178 : unregister_shrinker(&dev_priv->mm.shrinker);
1179 :
1180 : if (dev->pdev->msi_enabled)
1181 : pci_disable_msi(dev->pdev);
1182 :
1183 : intel_teardown_mchbar(dev);
1184 : pm_qos_remove_request(&dev_priv->pm_qos);
1185 : destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1186 : out_freedpwq:
1187 : destroy_workqueue(dev_priv->hotplug.dp_wq);
1188 : out_freewq:
1189 : destroy_workqueue(dev_priv->wq);
1190 : out_mtrrfree:
1191 : arch_phys_wc_del(dev_priv->gtt.mtrr);
1192 : io_mapping_free(dev_priv->gtt.mappable);
1193 : out_gtt:
1194 : i915_global_gtt_cleanup(dev);
1195 : out_freecsr:
1196 : intel_csr_ucode_fini(dev);
1197 : intel_uncore_fini(dev);
1198 : pci_iounmap(dev->pdev, dev_priv->regs);
1199 : put_bridge:
1200 : pci_dev_put(dev_priv->bridge_dev);
1201 : free_priv:
1202 : kmem_cache_destroy(dev_priv->requests);
1203 : kmem_cache_destroy(dev_priv->vmas);
1204 : kmem_cache_destroy(dev_priv->objects);
1205 : kfree(dev_priv);
1206 : return ret;
1207 : }
1208 :
1209 : int i915_driver_unload(struct drm_device *dev)
1210 : {
1211 : struct drm_i915_private *dev_priv = dev->dev_private;
1212 : int ret;
1213 :
1214 : i915_audio_component_cleanup(dev_priv);
1215 :
1216 : ret = i915_gem_suspend(dev);
1217 : if (ret) {
1218 : DRM_ERROR("failed to idle hardware: %d\n", ret);
1219 : return ret;
1220 : }
1221 :
1222 : intel_power_domains_fini(dev_priv);
1223 :
1224 : intel_gpu_ips_teardown();
1225 :
1226 : i915_teardown_sysfs(dev);
1227 :
1228 : WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1229 : unregister_shrinker(&dev_priv->mm.shrinker);
1230 :
1231 : io_mapping_free(dev_priv->gtt.mappable);
1232 : arch_phys_wc_del(dev_priv->gtt.mtrr);
1233 :
1234 : acpi_video_unregister();
1235 :
1236 : intel_fbdev_fini(dev);
1237 :
1238 : drm_vblank_cleanup(dev);
1239 :
1240 : intel_modeset_cleanup(dev);
1241 :
1242 : /*
1243 : * free the memory space allocated for the child device
1244 : * config parsed from VBT
1245 : */
1246 : if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1247 : kfree(dev_priv->vbt.child_dev);
1248 : dev_priv->vbt.child_dev = NULL;
1249 : dev_priv->vbt.child_dev_num = 0;
1250 : }
1251 : kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1252 : dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1253 : kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1254 : dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1255 :
1256 : vga_switcheroo_unregister_client(dev->pdev);
1257 : vga_client_register(dev->pdev, NULL, NULL, NULL);
1258 :
1259 : /* Free error state after interrupts are fully disabled. */
1260 : cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1261 : i915_destroy_error_state(dev);
1262 :
1263 : if (dev->pdev->msi_enabled)
1264 : pci_disable_msi(dev->pdev);
1265 :
1266 : intel_opregion_fini(dev);
1267 :
1268 : /* Flush any outstanding unpin_work. */
1269 : flush_workqueue(dev_priv->wq);
1270 :
1271 : intel_guc_ucode_fini(dev);
1272 : mutex_lock(&dev->struct_mutex);
1273 : i915_gem_cleanup_ringbuffer(dev);
1274 : i915_gem_context_fini(dev);
1275 : mutex_unlock(&dev->struct_mutex);
1276 : intel_fbc_cleanup_cfb(dev_priv);
1277 : i915_gem_cleanup_stolen(dev);
1278 :
1279 : intel_csr_ucode_fini(dev);
1280 :
1281 : intel_teardown_mchbar(dev);
1282 :
1283 : destroy_workqueue(dev_priv->hotplug.dp_wq);
1284 : destroy_workqueue(dev_priv->wq);
1285 : destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1286 : pm_qos_remove_request(&dev_priv->pm_qos);
1287 :
1288 : i915_global_gtt_cleanup(dev);
1289 :
1290 : intel_uncore_fini(dev);
1291 : if (dev_priv->regs != NULL)
1292 : pci_iounmap(dev->pdev, dev_priv->regs);
1293 :
1294 : kmem_cache_destroy(dev_priv->requests);
1295 : kmem_cache_destroy(dev_priv->vmas);
1296 : kmem_cache_destroy(dev_priv->objects);
1297 : pci_dev_put(dev_priv->bridge_dev);
1298 : kfree(dev_priv);
1299 :
1300 : return 0;
1301 : }
1302 : #else
1303 0 : int i915_driver_load(struct drm_device *dev, unsigned long flags)
1304 : {
1305 : struct drm_i915_private *dev_priv;
1306 : struct intel_device_info *info, *device_info;
1307 : int ret = 0, mmio_bar, mmio_size;
1308 : uint32_t aperture_size;
1309 : int i;
1310 :
1311 0 : info = (struct intel_device_info *) flags;
1312 :
1313 : #ifdef __linux__
1314 : dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1315 : if (dev_priv == NULL)
1316 : return -ENOMEM;
1317 :
1318 : dev->dev_private = dev_priv;
1319 : dev_priv->dev = dev;
1320 : #else
1321 0 : dev_priv = dev->dev_private;
1322 : #endif
1323 :
1324 : /* Setup the write-once "constant" device info */
1325 0 : device_info = (struct intel_device_info *)&dev_priv->info;
1326 0 : memcpy(device_info, info, sizeof(dev_priv->info));
1327 0 : device_info->device_id = dev->pdev->device;
1328 :
1329 0 : mtx_init(&dev_priv->irq_lock, IPL_TTY);
1330 0 : mtx_init(&dev_priv->gpu_error.lock, IPL_TTY);
1331 0 : rw_init(&dev_priv->backlight_lock, "blight");
1332 0 : mtx_init(&dev_priv->uncore.lock, IPL_TTY);
1333 0 : mtx_init(&dev_priv->mm.object_stat_lock, IPL_TTY);
1334 0 : mtx_init(&dev_priv->mmio_flip_lock, IPL_TTY);
1335 0 : rw_init(&dev_priv->sb_lock, "sb");
1336 0 : rw_init(&dev_priv->modeset_restore_lock, "rest");
1337 0 : rw_init(&dev_priv->csr_lock, "csr");
1338 0 : rw_init(&dev_priv->av_mutex, "avm");
1339 :
1340 0 : intel_pm_setup(dev);
1341 :
1342 0 : intel_display_crc_init(dev);
1343 :
1344 : #ifdef __linux__
1345 : i915_dump_device_info(dev_priv);
1346 : #endif
1347 :
1348 : /* Not all pre-production machines fall into this category, only the
1349 : * very first ones. Almost everything should work, except for maybe
1350 : * suspend/resume. And we don't implement workarounds that affect only
1351 : * pre-production machines. */
1352 0 : if (IS_HSW_EARLY_SDV(dev))
1353 : DRM_INFO("This is an early pre-production Haswell machine. "
1354 : "It may not be fully functional.\n");
1355 :
1356 0 : if (i915_get_bridge_dev(dev)) {
1357 : ret = -EIO;
1358 0 : goto free_priv;
1359 : }
1360 :
1361 0 : mmio_bar = IS_GEN2(dev) ? 1 : 0;
1362 : /* Before gen4, the registers and the GTT are behind different BARs.
1363 : * However, from gen4 onwards, the registers and the GTT are shared
1364 : * in the same BAR, so we want to restrict this ioremap from
1365 : * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1366 : * the register BAR remains the same size for all the earlier
1367 : * generations up to Ironlake.
1368 : */
1369 0 : if (info->gen < 5)
1370 0 : mmio_size = 512*1024;
1371 : else
1372 : mmio_size = 2*1024*1024;
1373 :
1374 : #ifdef __linux__
1375 : dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1376 : if (!dev_priv->regs) {
1377 : DRM_ERROR("failed to map registers\n");
1378 : ret = -EIO;
1379 : goto put_bridge;
1380 : }
1381 : #endif
1382 :
1383 : /* This must be called before any calls to HAS_PCH_* */
1384 0 : intel_detect_pch(dev);
1385 :
1386 0 : intel_uncore_init(dev);
1387 :
1388 : /* Load CSR Firmware for SKL */
1389 0 : intel_csr_ucode_init(dev);
1390 :
1391 0 : ret = i915_gem_gtt_init(dev);
1392 0 : if (ret)
1393 : goto out_freecsr;
1394 :
1395 : /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1396 : * otherwise the vga fbdev driver falls over. */
1397 0 : ret = i915_kick_out_firmware_fb(dev_priv);
1398 0 : if (ret) {
1399 0 : DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1400 0 : goto out_gtt;
1401 : }
1402 :
1403 0 : ret = i915_kick_out_vgacon(dev_priv);
1404 0 : if (ret) {
1405 0 : DRM_ERROR("failed to remove conflicting VGA console\n");
1406 0 : goto out_gtt;
1407 : }
1408 :
1409 : pci_set_master(dev->pdev);
1410 :
1411 : /* overlay on gen2 is broken and can't address above 1G */
1412 0 : if (IS_GEN2(dev))
1413 : dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1414 :
1415 : /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1416 : * using 32bit addressing, overwriting memory if HWS is located
1417 : * above 4GB.
1418 : *
1419 : * The documentation also mentions an issue with undefined
1420 : * behaviour if any general state is accessed within a page above 4GB,
1421 : * which also needs to be handled carefully.
1422 : */
1423 0 : if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1424 : dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1425 :
1426 0 : aperture_size = dev_priv->gtt.mappable_end;
1427 :
1428 : #ifdef __linux__
1429 : dev_priv->gtt.mappable =
1430 : io_mapping_create_wc(dev_priv->gtt.mappable_base,
1431 : aperture_size);
1432 : if (dev_priv->gtt.mappable == NULL) {
1433 : ret = -EIO;
1434 : goto out_gtt;
1435 : }
1436 :
1437 : dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1438 : aperture_size);
1439 : #else
1440 : /* XXX would be a lot nicer to get agp info before now */
1441 0 : uvm_page_physload(atop(dev_priv->gtt.mappable_base),
1442 0 : atop(dev_priv->gtt.mappable_base + aperture_size),
1443 : atop(dev_priv->gtt.mappable_base),
1444 : atop(dev_priv->gtt.mappable_base + aperture_size),
1445 : PHYSLOAD_DEVICE);
1446 : /* array of vm pages that physload introduced. */
1447 0 : dev_priv->pgs = PHYS_TO_VM_PAGE(dev_priv->gtt.mappable_base);
1448 0 : KASSERT(dev_priv->pgs != NULL);
1449 : /*
1450 : * XXX mark all pages write combining so user mmaps get the right
1451 : * bits. We really need a proper MI api for doing this, but for now
1452 : * this allows us to use PAT where available.
1453 : */
1454 0 : for (i = 0; i < atop(aperture_size); i++)
1455 0 : atomic_setbits_int(&(dev_priv->pgs[i].pg_flags), PG_PMAP_WC);
1456 0 : if (agp_init_map(dev_priv->bst, dev_priv->gtt.mappable_base,
1457 : aperture_size, BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE,
1458 0 : &dev_priv->agph))
1459 0 : panic("can't map aperture");
1460 : #endif
1461 :
1462 : /* The i915 workqueue is primarily used for batched retirement of
1463 : * requests (and thus managing bo) once the task has been completed
1464 : * by the GPU. i915_gem_retire_requests() is called directly when we
1465 : * need high-priority retirement, such as waiting for an explicit
1466 : * bo.
1467 : *
1468 : * It is also used for periodic low-priority events, such as
1469 : * idle-timers and recording error state.
1470 : *
1471 : * All tasks on the workqueue are expected to acquire the dev mutex
1472 : * so there is no point in running more than one instance of the
1473 : * workqueue at any time. Use an ordered one.
1474 : */
1475 0 : dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1476 0 : if (dev_priv->wq == NULL) {
1477 0 : DRM_ERROR("Failed to create our workqueue.\n");
1478 : ret = -ENOMEM;
1479 0 : goto out_mtrrfree;
1480 : }
1481 :
1482 0 : dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1483 0 : if (dev_priv->hotplug.dp_wq == NULL) {
1484 0 : DRM_ERROR("Failed to create our dp workqueue.\n");
1485 : ret = -ENOMEM;
1486 0 : goto out_freewq;
1487 : }
1488 :
1489 0 : dev_priv->gpu_error.hangcheck_wq =
1490 0 : alloc_ordered_workqueue("i915-hangcheck", 0);
1491 0 : if (dev_priv->gpu_error.hangcheck_wq == NULL) {
1492 0 : DRM_ERROR("Failed to create our hangcheck workqueue.\n");
1493 : ret = -ENOMEM;
1494 0 : goto out_freedpwq;
1495 : }
1496 :
1497 0 : intel_irq_init(dev_priv);
1498 0 : intel_uncore_sanitize(dev);
1499 :
1500 : /* Try to make sure MCHBAR is enabled before poking at it */
1501 0 : intel_setup_mchbar(dev);
1502 0 : intel_opregion_setup(dev);
1503 :
1504 0 : i915_gem_load(dev);
1505 :
1506 : /* On the 945G/GM, the chipset reports the MSI capability on the
1507 : * integrated graphics even though the support isn't actually there
1508 : * according to the published specs. It doesn't appear to function
1509 : * correctly in testing on 945G.
1510 : * This may be a side effect of MSI having been made available for PEG
1511 : * and the registers being closely associated.
1512 : *
1513 : * According to chipset errata, on the 965GM, MSI interrupts may
1514 : * be lost or delayed, but we use them anyways to avoid
1515 : * stuck interrupts on some machines.
1516 : */
1517 0 : if (!IS_I945G(dev) && !IS_I945GM(dev))
1518 : pci_enable_msi(dev->pdev);
1519 :
1520 0 : intel_device_info_runtime_init(dev);
1521 :
1522 0 : intel_init_dpio(dev_priv);
1523 :
1524 0 : if (INTEL_INFO(dev)->num_pipes) {
1525 0 : ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1526 0 : if (ret)
1527 : goto out_gem_unload;
1528 : }
1529 :
1530 0 : intel_power_domains_init(dev_priv);
1531 :
1532 0 : ret = i915_load_modeset_init(dev);
1533 0 : if (ret < 0) {
1534 0 : DRM_ERROR("failed to init modeset\n");
1535 : goto out_power_well;
1536 : }
1537 :
1538 : /*
1539 : * Notify a valid surface after modesetting,
1540 : * when running inside a VM.
1541 : */
1542 0 : if (intel_vgpu_active(dev))
1543 0 : I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1544 :
1545 : #ifdef __linux__
1546 : i915_setup_sysfs(dev);
1547 : #endif
1548 :
1549 0 : if (INTEL_INFO(dev)->num_pipes) {
1550 : /* Must be done after probing outputs */
1551 0 : intel_opregion_init(dev);
1552 : acpi_video_register();
1553 0 : }
1554 :
1555 0 : if (IS_GEN5(dev))
1556 0 : intel_gpu_ips_init(dev_priv);
1557 :
1558 0 : intel_runtime_pm_enable(dev_priv);
1559 :
1560 : #ifdef notyet
1561 : i915_audio_component_init(dev_priv);
1562 : #endif
1563 :
1564 0 : return 0;
1565 :
1566 : out_power_well:
1567 0 : intel_power_domains_fini(dev_priv);
1568 0 : drm_vblank_cleanup(dev);
1569 : out_gem_unload:
1570 : #ifdef __linux__
1571 : WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1572 : unregister_shrinker(&dev_priv->mm.shrinker);
1573 : #endif
1574 :
1575 0 : if (dev->pdev->msi_enabled)
1576 : pci_disable_msi(dev->pdev);
1577 :
1578 0 : intel_teardown_mchbar(dev);
1579 : pm_qos_remove_request(&dev_priv->pm_qos);
1580 0 : destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1581 : out_freedpwq:
1582 0 : destroy_workqueue(dev_priv->hotplug.dp_wq);
1583 : out_freewq:
1584 0 : destroy_workqueue(dev_priv->wq);
1585 : out_mtrrfree:
1586 : #ifdef __linux__
1587 : arch_phys_wc_del(dev_priv->gtt.mtrr);
1588 : io_mapping_free(dev_priv->gtt.mappable);
1589 : #endif
1590 : out_gtt:
1591 0 : i915_global_gtt_cleanup(dev);
1592 : out_freecsr:
1593 0 : intel_csr_ucode_fini(dev);
1594 0 : intel_uncore_fini(dev);
1595 : #ifdef __linux__
1596 : pci_iounmap(dev->pdev, dev_priv->regs);
1597 : put_bridge:
1598 : #endif
1599 : pci_dev_put(dev_priv->bridge_dev);
1600 : free_priv:
1601 : #ifdef __linux__
1602 : kmem_cache_destroy(dev_priv->requests);
1603 : kmem_cache_destroy(dev_priv->vmas);
1604 : kmem_cache_destroy(dev_priv->objects);
1605 : kfree(dev_priv);
1606 : #endif
1607 0 : return ret;
1608 0 : }
1609 :
1610 : #endif
1611 :
1612 0 : int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1613 : {
1614 : int ret;
1615 :
1616 0 : ret = i915_gem_open(dev, file);
1617 0 : if (ret)
1618 0 : return ret;
1619 :
1620 0 : return 0;
1621 0 : }
1622 :
1623 : /**
1624 : * i915_driver_lastclose - clean up after all DRM clients have exited
1625 : * @dev: DRM device
1626 : *
1627 : * Take care of cleaning up after all DRM clients have exited. In the
1628 : * mode setting case, we want to restore the kernel's initial mode (just
1629 : * in case the last client left us in a bad state).
1630 : *
1631 : * Additionally, in the non-mode setting case, we'll tear down the GTT
1632 : * and DMA structures, since the kernel won't be using them, and clea
1633 : * up any GEM state.
1634 : */
1635 0 : void i915_driver_lastclose(struct drm_device *dev)
1636 : {
1637 0 : intel_fbdev_restore_mode(dev);
1638 : vga_switcheroo_process_delayed_switch();
1639 0 : }
1640 :
1641 0 : void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1642 : {
1643 0 : mutex_lock(&dev->struct_mutex);
1644 0 : i915_gem_context_close(dev, file);
1645 0 : i915_gem_release(dev, file);
1646 0 : mutex_unlock(&dev->struct_mutex);
1647 :
1648 0 : intel_modeset_preclose(dev, file);
1649 0 : }
1650 :
1651 0 : void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1652 : {
1653 0 : struct drm_i915_file_private *file_priv = file->driver_priv;
1654 :
1655 0 : if (file_priv && file_priv->bsd_ring)
1656 0 : file_priv->bsd_ring = NULL;
1657 0 : kfree(file_priv);
1658 0 : }
1659 :
1660 : static int
1661 0 : i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1662 : struct drm_file *file)
1663 : {
1664 0 : return -ENODEV;
1665 : }
1666 :
1667 : const struct drm_ioctl_desc i915_ioctls[] = {
1668 : DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1669 : DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1670 : DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1671 : DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1672 : DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1673 : DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1674 : DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1675 : DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1676 : DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1677 : DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1678 : DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1679 : DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1680 : DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1681 : DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1682 : DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1683 : DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1684 : DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1685 : DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1686 : #ifdef __linux__
1687 : DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1688 : #endif
1689 : DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1690 : DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1691 : DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1692 : DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1693 : DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1694 : DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1695 : DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1696 : DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1697 : DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1698 : DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1699 : DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1700 : DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1701 : DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1702 : DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1703 : DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1704 : DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1705 : DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1706 : DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1707 : DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1708 : DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1709 : DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1710 : DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1711 : DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1712 : DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1713 : DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1714 : DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1715 : DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1716 : DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1717 : DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1718 : DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
1719 : DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1720 : DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1721 : DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1722 : };
1723 :
1724 : int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
|