Line data Source code
1 : /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 : */
3 : /*
4 : *
5 : * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 : * All Rights Reserved.
7 : *
8 : * Permission is hereby granted, free of charge, to any person obtaining a
9 : * copy of this software and associated documentation files (the
10 : * "Software"), to deal in the Software without restriction, including
11 : * without limitation the rights to use, copy, modify, merge, publish,
12 : * distribute, sub license, and/or sell copies of the Software, and to
13 : * permit persons to whom the Software is furnished to do so, subject to
14 : * the following conditions:
15 : *
16 : * The above copyright notice and this permission notice (including the
17 : * next paragraph) shall be included in all copies or substantial portions
18 : * of the Software.
19 : *
20 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 : * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 : * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 : * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 : * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 : * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 : * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 : *
28 : */
29 :
30 : #ifdef __linux__
31 : #include <linux/device.h>
32 : #include <linux/acpi.h>
33 : #endif
34 : #include <dev/pci/drm/drmP.h>
35 : #include <dev/pci/drm/i915_drm.h>
36 : #include <dev/pci/drm/i915_pciids.h>
37 : #include "i915_drv.h"
38 : #include "i915_trace.h"
39 : #include "intel_drv.h"
40 :
41 : #ifdef __linux__
42 : #include <linux/console.h>
43 : #include <linux/module.h>
44 : #include <linux/pm_runtime.h>
45 : #endif
46 : #include <dev/pci/drm/drm_crtc_helper.h>
47 :
48 : static struct drm_driver driver;
49 :
50 : #define GEN_DEFAULT_PIPEOFFSETS \
51 : .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
52 : PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
53 : .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
54 : TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
55 : .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
56 :
57 : #define GEN_CHV_PIPEOFFSETS \
58 : .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
59 : CHV_PIPE_C_OFFSET }, \
60 : .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
61 : CHV_TRANSCODER_C_OFFSET, }, \
62 : .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
63 : CHV_PALETTE_C_OFFSET }
64 :
65 : #define CURSOR_OFFSETS \
66 : .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
67 :
68 : #define IVB_CURSOR_OFFSETS \
69 : .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
70 :
71 : static const struct intel_device_info intel_i830_info = {
72 : .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
73 : .has_overlay = 1, .overlay_needs_physical = 1,
74 : .ring_mask = RENDER_RING,
75 : GEN_DEFAULT_PIPEOFFSETS,
76 : CURSOR_OFFSETS,
77 : };
78 :
79 : static const struct intel_device_info intel_845g_info = {
80 : .gen = 2, .num_pipes = 1,
81 : .has_overlay = 1, .overlay_needs_physical = 1,
82 : .ring_mask = RENDER_RING,
83 : GEN_DEFAULT_PIPEOFFSETS,
84 : CURSOR_OFFSETS,
85 : };
86 :
87 : static const struct intel_device_info intel_i85x_info = {
88 : .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
89 : .cursor_needs_physical = 1,
90 : .has_overlay = 1, .overlay_needs_physical = 1,
91 : .has_fbc = 1,
92 : .ring_mask = RENDER_RING,
93 : GEN_DEFAULT_PIPEOFFSETS,
94 : CURSOR_OFFSETS,
95 : };
96 :
97 : static const struct intel_device_info intel_i865g_info = {
98 : .gen = 2, .num_pipes = 1,
99 : .has_overlay = 1, .overlay_needs_physical = 1,
100 : .ring_mask = RENDER_RING,
101 : GEN_DEFAULT_PIPEOFFSETS,
102 : CURSOR_OFFSETS,
103 : };
104 :
105 : static const struct intel_device_info intel_i915g_info = {
106 : .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
107 : .has_overlay = 1, .overlay_needs_physical = 1,
108 : .ring_mask = RENDER_RING,
109 : GEN_DEFAULT_PIPEOFFSETS,
110 : CURSOR_OFFSETS,
111 : };
112 : static const struct intel_device_info intel_i915gm_info = {
113 : .gen = 3, .is_mobile = 1, .num_pipes = 2,
114 : .cursor_needs_physical = 1,
115 : .has_overlay = 1, .overlay_needs_physical = 1,
116 : .supports_tv = 1,
117 : .has_fbc = 1,
118 : .ring_mask = RENDER_RING,
119 : GEN_DEFAULT_PIPEOFFSETS,
120 : CURSOR_OFFSETS,
121 : };
122 : static const struct intel_device_info intel_i945g_info = {
123 : .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
124 : .has_overlay = 1, .overlay_needs_physical = 1,
125 : .ring_mask = RENDER_RING,
126 : GEN_DEFAULT_PIPEOFFSETS,
127 : CURSOR_OFFSETS,
128 : };
129 : static const struct intel_device_info intel_i945gm_info = {
130 : .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
131 : .has_hotplug = 1, .cursor_needs_physical = 1,
132 : .has_overlay = 1, .overlay_needs_physical = 1,
133 : .supports_tv = 1,
134 : .has_fbc = 1,
135 : .ring_mask = RENDER_RING,
136 : GEN_DEFAULT_PIPEOFFSETS,
137 : CURSOR_OFFSETS,
138 : };
139 :
140 : static const struct intel_device_info intel_i965g_info = {
141 : .gen = 4, .is_broadwater = 1, .num_pipes = 2,
142 : .has_hotplug = 1,
143 : .has_overlay = 1,
144 : .ring_mask = RENDER_RING,
145 : GEN_DEFAULT_PIPEOFFSETS,
146 : CURSOR_OFFSETS,
147 : };
148 :
149 : static const struct intel_device_info intel_i965gm_info = {
150 : .gen = 4, .is_crestline = 1, .num_pipes = 2,
151 : .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
152 : .has_overlay = 1,
153 : .supports_tv = 1,
154 : .ring_mask = RENDER_RING,
155 : GEN_DEFAULT_PIPEOFFSETS,
156 : CURSOR_OFFSETS,
157 : };
158 :
159 : static const struct intel_device_info intel_g33_info = {
160 : .gen = 3, .is_g33 = 1, .num_pipes = 2,
161 : .need_gfx_hws = 1, .has_hotplug = 1,
162 : .has_overlay = 1,
163 : .ring_mask = RENDER_RING,
164 : GEN_DEFAULT_PIPEOFFSETS,
165 : CURSOR_OFFSETS,
166 : };
167 :
168 : static const struct intel_device_info intel_g45_info = {
169 : .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
170 : .has_pipe_cxsr = 1, .has_hotplug = 1,
171 : .ring_mask = RENDER_RING | BSD_RING,
172 : GEN_DEFAULT_PIPEOFFSETS,
173 : CURSOR_OFFSETS,
174 : };
175 :
176 : static const struct intel_device_info intel_gm45_info = {
177 : .gen = 4, .is_g4x = 1, .num_pipes = 2,
178 : .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
179 : .has_pipe_cxsr = 1, .has_hotplug = 1,
180 : .supports_tv = 1,
181 : .ring_mask = RENDER_RING | BSD_RING,
182 : GEN_DEFAULT_PIPEOFFSETS,
183 : CURSOR_OFFSETS,
184 : };
185 :
186 : static const struct intel_device_info intel_pineview_info = {
187 : .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
188 : .need_gfx_hws = 1, .has_hotplug = 1,
189 : .has_overlay = 1,
190 : GEN_DEFAULT_PIPEOFFSETS,
191 : CURSOR_OFFSETS,
192 : };
193 :
194 : static const struct intel_device_info intel_ironlake_d_info = {
195 : .gen = 5, .num_pipes = 2,
196 : .need_gfx_hws = 1, .has_hotplug = 1,
197 : .ring_mask = RENDER_RING | BSD_RING,
198 : GEN_DEFAULT_PIPEOFFSETS,
199 : CURSOR_OFFSETS,
200 : };
201 :
202 : static const struct intel_device_info intel_ironlake_m_info = {
203 : .gen = 5, .is_mobile = 1, .num_pipes = 2,
204 : .need_gfx_hws = 1, .has_hotplug = 1,
205 : .has_fbc = 1,
206 : .ring_mask = RENDER_RING | BSD_RING,
207 : GEN_DEFAULT_PIPEOFFSETS,
208 : CURSOR_OFFSETS,
209 : };
210 :
211 : static const struct intel_device_info intel_sandybridge_d_info = {
212 : .gen = 6, .num_pipes = 2,
213 : .need_gfx_hws = 1, .has_hotplug = 1,
214 : .has_fbc = 1,
215 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
216 : .has_llc = 1,
217 : GEN_DEFAULT_PIPEOFFSETS,
218 : CURSOR_OFFSETS,
219 : };
220 :
221 : static const struct intel_device_info intel_sandybridge_m_info = {
222 : .gen = 6, .is_mobile = 1, .num_pipes = 2,
223 : .need_gfx_hws = 1, .has_hotplug = 1,
224 : .has_fbc = 1,
225 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
226 : .has_llc = 1,
227 : GEN_DEFAULT_PIPEOFFSETS,
228 : CURSOR_OFFSETS,
229 : };
230 :
231 : #define GEN7_FEATURES \
232 : .gen = 7, .num_pipes = 3, \
233 : .need_gfx_hws = 1, .has_hotplug = 1, \
234 : .has_fbc = 1, \
235 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
236 : .has_llc = 1
237 :
238 : static const struct intel_device_info intel_ivybridge_d_info = {
239 : GEN7_FEATURES,
240 : .is_ivybridge = 1,
241 : GEN_DEFAULT_PIPEOFFSETS,
242 : IVB_CURSOR_OFFSETS,
243 : };
244 :
245 : static const struct intel_device_info intel_ivybridge_m_info = {
246 : GEN7_FEATURES,
247 : .is_ivybridge = 1,
248 : .is_mobile = 1,
249 : GEN_DEFAULT_PIPEOFFSETS,
250 : IVB_CURSOR_OFFSETS,
251 : };
252 :
253 : static const struct intel_device_info intel_ivybridge_q_info = {
254 : GEN7_FEATURES,
255 : .is_ivybridge = 1,
256 : .num_pipes = 0, /* legal, last one wins */
257 : GEN_DEFAULT_PIPEOFFSETS,
258 : IVB_CURSOR_OFFSETS,
259 : };
260 :
261 : static const struct intel_device_info intel_valleyview_m_info = {
262 : GEN7_FEATURES,
263 : .is_mobile = 1,
264 : .num_pipes = 2,
265 : .is_valleyview = 1,
266 : .display_mmio_offset = VLV_DISPLAY_BASE,
267 : .has_fbc = 0, /* legal, last one wins */
268 : .has_llc = 0, /* legal, last one wins */
269 : GEN_DEFAULT_PIPEOFFSETS,
270 : CURSOR_OFFSETS,
271 : };
272 :
273 : static const struct intel_device_info intel_valleyview_d_info = {
274 : GEN7_FEATURES,
275 : .num_pipes = 2,
276 : .is_valleyview = 1,
277 : .display_mmio_offset = VLV_DISPLAY_BASE,
278 : .has_fbc = 0, /* legal, last one wins */
279 : .has_llc = 0, /* legal, last one wins */
280 : GEN_DEFAULT_PIPEOFFSETS,
281 : CURSOR_OFFSETS,
282 : };
283 :
284 : static const struct intel_device_info intel_haswell_d_info = {
285 : GEN7_FEATURES,
286 : .is_haswell = 1,
287 : .has_ddi = 1,
288 : .has_fpga_dbg = 1,
289 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
290 : GEN_DEFAULT_PIPEOFFSETS,
291 : IVB_CURSOR_OFFSETS,
292 : };
293 :
294 : static const struct intel_device_info intel_haswell_m_info = {
295 : GEN7_FEATURES,
296 : .is_haswell = 1,
297 : .is_mobile = 1,
298 : .has_ddi = 1,
299 : .has_fpga_dbg = 1,
300 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
301 : GEN_DEFAULT_PIPEOFFSETS,
302 : IVB_CURSOR_OFFSETS,
303 : };
304 :
305 : static const struct intel_device_info intel_broadwell_d_info = {
306 : .gen = 8, .num_pipes = 3,
307 : .need_gfx_hws = 1, .has_hotplug = 1,
308 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
309 : .has_llc = 1,
310 : .has_ddi = 1,
311 : .has_fpga_dbg = 1,
312 : .has_fbc = 1,
313 : GEN_DEFAULT_PIPEOFFSETS,
314 : IVB_CURSOR_OFFSETS,
315 : };
316 :
317 : static const struct intel_device_info intel_broadwell_m_info = {
318 : .gen = 8, .is_mobile = 1, .num_pipes = 3,
319 : .need_gfx_hws = 1, .has_hotplug = 1,
320 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
321 : .has_llc = 1,
322 : .has_ddi = 1,
323 : .has_fpga_dbg = 1,
324 : .has_fbc = 1,
325 : GEN_DEFAULT_PIPEOFFSETS,
326 : IVB_CURSOR_OFFSETS,
327 : };
328 :
329 : static const struct intel_device_info intel_broadwell_gt3d_info = {
330 : .gen = 8, .num_pipes = 3,
331 : .need_gfx_hws = 1, .has_hotplug = 1,
332 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
333 : .has_llc = 1,
334 : .has_ddi = 1,
335 : .has_fpga_dbg = 1,
336 : .has_fbc = 1,
337 : GEN_DEFAULT_PIPEOFFSETS,
338 : IVB_CURSOR_OFFSETS,
339 : };
340 :
341 : static const struct intel_device_info intel_broadwell_gt3m_info = {
342 : .gen = 8, .is_mobile = 1, .num_pipes = 3,
343 : .need_gfx_hws = 1, .has_hotplug = 1,
344 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
345 : .has_llc = 1,
346 : .has_ddi = 1,
347 : .has_fpga_dbg = 1,
348 : .has_fbc = 1,
349 : GEN_DEFAULT_PIPEOFFSETS,
350 : IVB_CURSOR_OFFSETS,
351 : };
352 :
353 : static const struct intel_device_info intel_cherryview_info = {
354 : .gen = 8, .num_pipes = 3,
355 : .need_gfx_hws = 1, .has_hotplug = 1,
356 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
357 : .is_valleyview = 1,
358 : .display_mmio_offset = VLV_DISPLAY_BASE,
359 : GEN_CHV_PIPEOFFSETS,
360 : CURSOR_OFFSETS,
361 : };
362 :
363 : static const struct intel_device_info intel_skylake_info = {
364 : .is_skylake = 1,
365 : .gen = 9, .num_pipes = 3,
366 : .need_gfx_hws = 1, .has_hotplug = 1,
367 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
368 : .has_llc = 1,
369 : .has_ddi = 1,
370 : .has_fpga_dbg = 1,
371 : .has_fbc = 1,
372 : GEN_DEFAULT_PIPEOFFSETS,
373 : IVB_CURSOR_OFFSETS,
374 : };
375 :
376 : static const struct intel_device_info intel_skylake_gt3_info = {
377 : .is_skylake = 1,
378 : .gen = 9, .num_pipes = 3,
379 : .need_gfx_hws = 1, .has_hotplug = 1,
380 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
381 : .has_llc = 1,
382 : .has_ddi = 1,
383 : .has_fpga_dbg = 1,
384 : .has_fbc = 1,
385 : GEN_DEFAULT_PIPEOFFSETS,
386 : IVB_CURSOR_OFFSETS,
387 : };
388 :
389 : static const struct intel_device_info intel_broxton_info = {
390 : .is_preliminary = 1,
391 : .is_broxton = 1,
392 : .gen = 9,
393 : .need_gfx_hws = 1, .has_hotplug = 1,
394 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
395 : .num_pipes = 3,
396 : .has_ddi = 1,
397 : .has_fpga_dbg = 1,
398 : .has_fbc = 1,
399 : GEN_DEFAULT_PIPEOFFSETS,
400 : IVB_CURSOR_OFFSETS,
401 : };
402 :
403 : static const struct intel_device_info intel_kabylake_info = {
404 : .is_kabylake = 1,
405 : .gen = 9,
406 : .num_pipes = 3,
407 : .need_gfx_hws = 1, .has_hotplug = 1,
408 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
409 : .has_llc = 1,
410 : .has_ddi = 1,
411 : .has_fpga_dbg = 1,
412 : .has_fbc = 1,
413 : GEN_DEFAULT_PIPEOFFSETS,
414 : IVB_CURSOR_OFFSETS,
415 : };
416 :
417 : static const struct intel_device_info intel_kabylake_gt3_info = {
418 : .is_kabylake = 1,
419 : .gen = 9,
420 : .num_pipes = 3,
421 : .need_gfx_hws = 1, .has_hotplug = 1,
422 : .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
423 : .has_llc = 1,
424 : .has_ddi = 1,
425 : .has_fpga_dbg = 1,
426 : .has_fbc = 1,
427 : GEN_DEFAULT_PIPEOFFSETS,
428 : IVB_CURSOR_OFFSETS,
429 : };
430 :
431 : /*
432 : * Make sure any device matches here are from most specific to most
433 : * general. For example, since the Quanta match is based on the subsystem
434 : * and subvendor IDs, we need it to come before the more general IVB
435 : * PCI ID matches, otherwise we'll use the wrong info struct above.
436 : */
437 : #define INTEL_PCI_IDS \
438 : INTEL_I830_IDS(&intel_i830_info), \
439 : INTEL_I845G_IDS(&intel_845g_info), \
440 : INTEL_I85X_IDS(&intel_i85x_info), \
441 : INTEL_I865G_IDS(&intel_i865g_info), \
442 : INTEL_I915G_IDS(&intel_i915g_info), \
443 : INTEL_I915GM_IDS(&intel_i915gm_info), \
444 : INTEL_I945G_IDS(&intel_i945g_info), \
445 : INTEL_I945GM_IDS(&intel_i945gm_info), \
446 : INTEL_I965G_IDS(&intel_i965g_info), \
447 : INTEL_G33_IDS(&intel_g33_info), \
448 : INTEL_I965GM_IDS(&intel_i965gm_info), \
449 : INTEL_GM45_IDS(&intel_gm45_info), \
450 : INTEL_G45_IDS(&intel_g45_info), \
451 : INTEL_PINEVIEW_IDS(&intel_pineview_info), \
452 : INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
453 : INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
454 : INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
455 : INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
456 : INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
457 : INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
458 : INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
459 : INTEL_HSW_D_IDS(&intel_haswell_d_info), \
460 : INTEL_HSW_M_IDS(&intel_haswell_m_info), \
461 : INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
462 : INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
463 : INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
464 : INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
465 : INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
466 : INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
467 : INTEL_CHV_IDS(&intel_cherryview_info), \
468 : INTEL_SKL_GT1_IDS(&intel_skylake_info), \
469 : INTEL_SKL_GT2_IDS(&intel_skylake_info), \
470 : INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \
471 : INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), \
472 : INTEL_BXT_IDS(&intel_broxton_info), \
473 : INTEL_KBL_GT1_IDS(&intel_kabylake_info), \
474 : INTEL_KBL_GT2_IDS(&intel_kabylake_info), \
475 : INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), \
476 : INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info)
477 :
478 : static const struct drm_pcidev pciidlist[] = { /* aka */
479 : INTEL_PCI_IDS,
480 : {0, 0, 0}
481 : };
482 :
483 : MODULE_DEVICE_TABLE(pci, pciidlist);
484 :
485 : #ifdef notyet
486 : static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
487 : {
488 : enum intel_pch ret = PCH_NOP;
489 :
490 : /*
491 : * In a virtualized passthrough environment we can be in a
492 : * setup where the ISA bridge is not able to be passed through.
493 : * In this case, a south bridge can be emulated and we have to
494 : * make an educated guess as to which PCH is really there.
495 : */
496 :
497 : if (IS_GEN5(dev)) {
498 : ret = PCH_IBX;
499 : DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
500 : } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
501 : ret = PCH_CPT;
502 : DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
503 : } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
504 : ret = PCH_LPT;
505 : DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
506 : } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
507 : ret = PCH_SPT;
508 : DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
509 : }
510 :
511 : return ret;
512 : }
513 : #endif
514 :
515 : static int
516 0 : intel_pch_match(struct pci_attach_args *pa)
517 : {
518 0 : if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
519 0 : PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
520 0 : PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_ISA)
521 0 : return (1);
522 0 : return (0);
523 0 : }
524 :
525 0 : void intel_detect_pch(struct drm_device *dev)
526 : {
527 0 : struct drm_i915_private *dev_priv = dev->dev_private;
528 0 : struct pci_attach_args pa;
529 :
530 : /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
531 : * (which really amounts to a PCH but no South Display).
532 : */
533 0 : if (INTEL_INFO(dev)->num_pipes == 0) {
534 0 : dev_priv->pch_type = PCH_NOP;
535 0 : return;
536 : }
537 :
538 : /*
539 : * The reason to probe ISA bridge instead of Dev31:Fun0 is to
540 : * make graphics device passthrough work easy for VMM, that only
541 : * need to expose ISA bridge to let driver know the real hardware
542 : * underneath. This is a requirement from virtualization team.
543 : *
544 : * In some virtualized environments (e.g. XEN), there is irrelevant
545 : * ISA bridge in the system. To work reliably, we should scan trhough
546 : * all the ISA bridge devices and check for the first match, instead
547 : * of only checking the first one.
548 : */
549 0 : if (pci_find_device(&pa, intel_pch_match)) {
550 0 : if (PCI_VENDOR(pa.pa_id) == PCI_VENDOR_ID_INTEL) {
551 0 : unsigned short id = PCI_PRODUCT(pa.pa_id) & INTEL_PCH_DEVICE_ID_MASK;
552 0 : dev_priv->pch_id = id;
553 :
554 0 : if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
555 0 : dev_priv->pch_type = PCH_IBX;
556 : DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
557 0 : WARN_ON(!IS_GEN5(dev));
558 0 : } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
559 0 : dev_priv->pch_type = PCH_CPT;
560 : DRM_DEBUG_KMS("Found CougarPoint PCH\n");
561 0 : WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
562 0 : } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
563 : /* PantherPoint is CPT compatible */
564 0 : dev_priv->pch_type = PCH_CPT;
565 : DRM_DEBUG_KMS("Found PantherPoint PCH\n");
566 0 : WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
567 0 : } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
568 0 : dev_priv->pch_type = PCH_LPT;
569 : DRM_DEBUG_KMS("Found LynxPoint PCH\n");
570 0 : WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
571 0 : WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
572 0 : } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
573 0 : dev_priv->pch_type = PCH_LPT;
574 : DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
575 0 : WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
576 0 : WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
577 0 : } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
578 0 : dev_priv->pch_type = PCH_SPT;
579 : DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
580 0 : WARN_ON(!IS_SKYLAKE(dev) &&
581 : !IS_KABYLAKE(dev));
582 0 : } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
583 0 : dev_priv->pch_type = PCH_SPT;
584 : DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
585 0 : WARN_ON(!IS_SKYLAKE(dev) &&
586 : !IS_KABYLAKE(dev));
587 0 : } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
588 0 : dev_priv->pch_type = PCH_KBP;
589 : DRM_DEBUG_KMS("Found KabyPoint PCH\n");
590 0 : WARN_ON(!IS_KABYLAKE(dev));
591 : #ifdef notyet
592 : } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
593 : ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
594 : pch->subsystem_vendor == 0x1af4 &&
595 : pch->subsystem_device == 0x1100)) {
596 : dev_priv->pch_type = intel_virt_detect_pch(dev);
597 : #endif
598 0 : }
599 0 : }
600 : } else
601 : DRM_DEBUG_KMS("No PCH found.\n");
602 0 : }
603 :
604 0 : bool i915_semaphore_is_enabled(struct drm_device *dev)
605 : {
606 0 : if (INTEL_INFO(dev)->gen < 6)
607 0 : return false;
608 :
609 0 : if (i915.semaphores >= 0)
610 0 : return i915.semaphores;
611 :
612 : /* TODO: make semaphores and Execlists play nicely together */
613 0 : if (i915.enable_execlists)
614 0 : return false;
615 :
616 : /* Until we get further testing... */
617 0 : if (IS_GEN8(dev))
618 0 : return false;
619 :
620 : #ifdef CONFIG_INTEL_IOMMU
621 : /* Enable semaphores on SNB when IO remapping is off */
622 : if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
623 : return false;
624 : #endif
625 :
626 0 : return true;
627 0 : }
628 :
629 0 : void i915_firmware_load_error_print(const char *fw_path, int err)
630 : {
631 0 : DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
632 :
633 : /*
634 : * If the reason is not known assume -ENOENT since that's the most
635 : * usual failure mode.
636 : */
637 0 : if (!err)
638 0 : err = -ENOENT;
639 :
640 0 : if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
641 : return;
642 :
643 0 : DRM_ERROR(
644 : "The driver is built-in, so to load the firmware you need to\n"
645 : "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
646 : "in your initrd/initramfs image.\n");
647 0 : }
648 :
649 0 : static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
650 : {
651 0 : struct drm_device *dev = dev_priv->dev;
652 : struct drm_encoder *encoder;
653 :
654 0 : drm_modeset_lock_all(dev);
655 0 : list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
656 0 : struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
657 :
658 0 : if (intel_encoder->suspend)
659 0 : intel_encoder->suspend(intel_encoder);
660 : }
661 0 : drm_modeset_unlock_all(dev);
662 0 : }
663 :
664 : static int intel_suspend_complete(struct drm_i915_private *dev_priv);
665 : static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
666 : bool rpm_resume);
667 : static int skl_resume_prepare(struct drm_i915_private *dev_priv);
668 : static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
669 :
670 :
671 0 : static int i915_drm_suspend(struct drm_device *dev)
672 : {
673 0 : struct drm_i915_private *dev_priv = dev->dev_private;
674 : pci_power_t opregion_target_state;
675 : int error;
676 :
677 : /* ignore lid events during suspend */
678 0 : mutex_lock(&dev_priv->modeset_restore_lock);
679 0 : dev_priv->modeset_restore = MODESET_SUSPENDED;
680 0 : mutex_unlock(&dev_priv->modeset_restore_lock);
681 :
682 : /* We do a lot of poking in a lot of registers, make sure they work
683 : * properly. */
684 0 : intel_display_set_init_power(dev_priv, true);
685 :
686 0 : drm_kms_helper_poll_disable(dev);
687 :
688 : pci_save_state(dev->pdev);
689 :
690 0 : error = i915_gem_suspend(dev);
691 0 : if (error) {
692 0 : dev_err(&dev->pdev->dev,
693 : "GEM idle failed, resume might fail\n");
694 0 : return error;
695 : }
696 :
697 : #ifdef notyet
698 : intel_guc_suspend(dev);
699 : #endif
700 :
701 0 : intel_suspend_gt_powersave(dev);
702 :
703 : /*
704 : * Disable CRTCs directly since we want to preserve sw state
705 : * for _thaw. Also, power gate the CRTC power wells.
706 : */
707 0 : drm_modeset_lock_all(dev);
708 0 : intel_display_suspend(dev);
709 0 : drm_modeset_unlock_all(dev);
710 :
711 0 : intel_dp_mst_suspend(dev);
712 :
713 0 : intel_runtime_pm_disable_interrupts(dev_priv);
714 0 : intel_hpd_cancel_work(dev_priv);
715 :
716 0 : intel_suspend_encoders(dev_priv);
717 :
718 0 : intel_suspend_hw(dev);
719 :
720 0 : i915_gem_suspend_gtt_mappings(dev);
721 :
722 0 : i915_save_state(dev);
723 :
724 : opregion_target_state = PCI_D3cold;
725 : #if IS_ENABLED(CONFIG_ACPI_SLEEP)
726 : if (acpi_target_system_state() < ACPI_STATE_S3)
727 : opregion_target_state = PCI_D1;
728 : #endif
729 0 : intel_opregion_notify_adapter(dev, opregion_target_state);
730 :
731 0 : intel_uncore_forcewake_reset(dev, false);
732 0 : intel_opregion_fini(dev);
733 :
734 : #ifdef __linux__
735 : intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
736 : #endif
737 :
738 0 : dev_priv->suspend_count++;
739 :
740 0 : intel_display_set_init_power(dev_priv, false);
741 :
742 0 : return 0;
743 0 : }
744 :
745 0 : static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
746 : {
747 0 : struct drm_i915_private *dev_priv = drm_dev->dev_private;
748 : int ret;
749 :
750 0 : ret = intel_suspend_complete(dev_priv);
751 :
752 0 : if (ret) {
753 0 : DRM_ERROR("Suspend complete failed: %d\n", ret);
754 :
755 0 : return ret;
756 : }
757 :
758 : pci_disable_device(drm_dev->pdev);
759 : #ifdef notyet
760 : /*
761 : * During hibernation on some platforms the BIOS may try to access
762 : * the device even though it's already in D3 and hang the machine. So
763 : * leave the device in D0 on those platforms and hope the BIOS will
764 : * power down the device properly. The issue was seen on multiple old
765 : * GENs with different BIOS vendors, so having an explicit blacklist
766 : * is inpractical; apply the workaround on everything pre GEN6. The
767 : * platforms where the issue was seen:
768 : * Lenovo Thinkpad X301, X61s, X60, T60, X41
769 : * Fujitsu FSC S7110
770 : * Acer Aspire 1830T
771 : */
772 : if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
773 : pci_set_power_state(drm_dev->pdev, PCI_D3hot);
774 : #endif
775 :
776 0 : return 0;
777 0 : }
778 :
779 : #ifdef __linux__
780 : int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
781 : {
782 : int error;
783 :
784 : if (!dev || !dev->dev_private) {
785 : DRM_ERROR("dev: %p\n", dev);
786 : DRM_ERROR("DRM not initialized, aborting suspend.\n");
787 : return -ENODEV;
788 : }
789 :
790 : if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
791 : state.event != PM_EVENT_FREEZE))
792 : return -EINVAL;
793 :
794 : if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
795 : return 0;
796 :
797 : error = i915_drm_suspend(dev);
798 : if (error)
799 : return error;
800 :
801 : return i915_drm_suspend_late(dev, false);
802 : }
803 : #endif
804 :
805 0 : static int i915_drm_resume(struct drm_device *dev)
806 : {
807 0 : struct drm_i915_private *dev_priv = dev->dev_private;
808 :
809 0 : mutex_lock(&dev->struct_mutex);
810 0 : i915_gem_restore_gtt_mappings(dev);
811 0 : mutex_unlock(&dev->struct_mutex);
812 :
813 0 : i915_restore_state(dev);
814 0 : intel_opregion_setup(dev);
815 :
816 0 : intel_init_pch_refclk(dev);
817 0 : drm_mode_config_reset(dev);
818 :
819 : /*
820 : * Interrupts have to be enabled before any batches are run. If not the
821 : * GPU will hang. i915_gem_init_hw() will initiate batches to
822 : * update/restore the context.
823 : *
824 : * Modeset enabling in intel_modeset_init_hw() also needs working
825 : * interrupts.
826 : */
827 0 : intel_runtime_pm_enable_interrupts(dev_priv);
828 :
829 0 : mutex_lock(&dev->struct_mutex);
830 0 : if (i915_gem_init_hw(dev)) {
831 0 : DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
832 0 : atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
833 0 : }
834 0 : mutex_unlock(&dev->struct_mutex);
835 :
836 : #ifdef notyet
837 : intel_guc_resume(dev);
838 : #endif
839 :
840 0 : intel_modeset_init_hw(dev);
841 :
842 0 : spin_lock_irq(&dev_priv->irq_lock);
843 0 : if (dev_priv->display.hpd_irq_setup)
844 0 : dev_priv->display.hpd_irq_setup(dev);
845 0 : spin_unlock_irq(&dev_priv->irq_lock);
846 :
847 0 : drm_modeset_lock_all(dev);
848 0 : intel_display_resume(dev);
849 0 : drm_modeset_unlock_all(dev);
850 :
851 0 : intel_dp_mst_resume(dev);
852 :
853 : /*
854 : * ... but also need to make sure that hotplug processing
855 : * doesn't cause havoc. Like in the driver load code we don't
856 : * bother with the tiny race here where we might loose hotplug
857 : * notifications.
858 : * */
859 0 : intel_hpd_init(dev_priv);
860 : /* Config may have changed between suspend and resume */
861 0 : drm_helper_hpd_irq_event(dev);
862 :
863 0 : intel_opregion_init(dev);
864 :
865 : #ifdef __linux__
866 : intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
867 : #endif
868 :
869 0 : mutex_lock(&dev_priv->modeset_restore_lock);
870 0 : dev_priv->modeset_restore = MODESET_DONE;
871 0 : mutex_unlock(&dev_priv->modeset_restore_lock);
872 :
873 0 : intel_opregion_notify_adapter(dev, PCI_D0);
874 :
875 0 : drm_kms_helper_poll_enable(dev);
876 :
877 0 : return 0;
878 : }
879 :
880 0 : static int i915_drm_resume_early(struct drm_device *dev)
881 : {
882 0 : struct drm_i915_private *dev_priv = dev->dev_private;
883 : int ret = 0;
884 :
885 : /*
886 : * We have a resume ordering issue with the snd-hda driver also
887 : * requiring our device to be power up. Due to the lack of a
888 : * parent/child relationship we currently solve this with an early
889 : * resume hook.
890 : *
891 : * FIXME: This should be solved with a special hdmi sink device or
892 : * similar so that power domains can be employed.
893 : */
894 : if (pci_enable_device(dev->pdev))
895 : return -EIO;
896 :
897 : pci_set_master(dev->pdev);
898 :
899 0 : if (IS_VALLEYVIEW(dev_priv))
900 0 : ret = vlv_resume_prepare(dev_priv, false);
901 0 : if (ret)
902 0 : DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
903 : ret);
904 :
905 0 : intel_uncore_early_sanitize(dev, true);
906 :
907 0 : if (IS_BROXTON(dev))
908 0 : ret = bxt_resume_prepare(dev_priv);
909 0 : else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
910 0 : ret = skl_resume_prepare(dev_priv);
911 0 : else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
912 0 : hsw_disable_pc8(dev_priv);
913 :
914 0 : intel_uncore_sanitize(dev);
915 0 : intel_power_domains_init_hw(dev_priv);
916 :
917 0 : return ret;
918 : }
919 :
920 : #ifdef __linux__
921 : int i915_resume_switcheroo(struct drm_device *dev)
922 : {
923 : int ret;
924 :
925 : if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
926 : return 0;
927 :
928 : ret = i915_drm_resume_early(dev);
929 : if (ret)
930 : return ret;
931 :
932 : return i915_drm_resume(dev);
933 : }
934 : #endif
935 :
936 : /**
937 : * i915_reset - reset chip after a hang
938 : * @dev: drm device to reset
939 : *
940 : * Reset the chip. Useful if a hang is detected. Returns zero on successful
941 : * reset or otherwise an error code.
942 : *
943 : * Procedure is fairly simple:
944 : * - reset the chip using the reset reg
945 : * - re-init context state
946 : * - re-init hardware status page
947 : * - re-init ring buffer
948 : * - re-init interrupt state
949 : * - re-init display
950 : */
951 0 : int i915_reset(struct drm_device *dev)
952 : {
953 0 : struct drm_i915_private *dev_priv = dev->dev_private;
954 : bool simulated;
955 : int ret;
956 :
957 0 : intel_reset_gt_powersave(dev);
958 :
959 0 : mutex_lock(&dev->struct_mutex);
960 :
961 0 : i915_gem_reset(dev);
962 :
963 0 : simulated = dev_priv->gpu_error.stop_rings != 0;
964 :
965 0 : ret = intel_gpu_reset(dev);
966 :
967 : /* Also reset the gpu hangman. */
968 0 : if (simulated) {
969 : DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
970 0 : dev_priv->gpu_error.stop_rings = 0;
971 0 : if (ret == -ENODEV) {
972 : DRM_INFO("Reset not implemented, but ignoring "
973 : "error for simulated gpu hangs\n");
974 : ret = 0;
975 0 : }
976 : }
977 :
978 0 : if (i915_stop_ring_allow_warn(dev_priv))
979 0 : pr_notice("drm/i915: Resetting chip after gpu hang\n");
980 :
981 0 : if (ret) {
982 0 : DRM_ERROR("Failed to reset chip: %i\n", ret);
983 0 : mutex_unlock(&dev->struct_mutex);
984 0 : return ret;
985 : }
986 :
987 0 : intel_overlay_reset(dev_priv);
988 :
989 : /* Ok, now get things going again... */
990 :
991 : /*
992 : * Everything depends on having the GTT running, so we need to start
993 : * there. Fortunately we don't need to do this unless we reset the
994 : * chip at a PCI level.
995 : *
996 : * Next we need to restore the context, but we don't use those
997 : * yet either...
998 : *
999 : * Ring buffer needs to be re-initialized in the KMS case, or if X
1000 : * was running at the time of the reset (i.e. we weren't VT
1001 : * switched away).
1002 : */
1003 :
1004 : /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
1005 0 : dev_priv->gpu_error.reload_in_reset = true;
1006 :
1007 0 : ret = i915_gem_init_hw(dev);
1008 :
1009 0 : dev_priv->gpu_error.reload_in_reset = false;
1010 :
1011 0 : mutex_unlock(&dev->struct_mutex);
1012 0 : if (ret) {
1013 0 : DRM_ERROR("Failed hw init on reset %d\n", ret);
1014 0 : return ret;
1015 : }
1016 :
1017 : /*
1018 : * rps/rc6 re-init is necessary to restore state lost after the
1019 : * reset and the re-install of gt irqs. Skip for ironlake per
1020 : * previous concerns that it doesn't respond well to some forms
1021 : * of re-init after reset.
1022 : */
1023 0 : if (INTEL_INFO(dev)->gen > 5)
1024 0 : intel_enable_gt_powersave(dev);
1025 :
1026 0 : return 0;
1027 0 : }
1028 :
1029 : #ifdef __linux__
1030 : static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1031 : {
1032 : struct intel_device_info *intel_info =
1033 : (struct intel_device_info *) ent->driver_data;
1034 :
1035 : if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
1036 : DRM_INFO("This hardware requires preliminary hardware support.\n"
1037 : "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
1038 : return -ENODEV;
1039 : }
1040 :
1041 : /* Only bind to function 0 of the device. Early generations
1042 : * used function 1 as a placeholder for multi-head. This causes
1043 : * us confusion instead, especially on the systems where both
1044 : * functions have the same PCI-ID!
1045 : */
1046 : if (PCI_FUNC(pdev->devfn))
1047 : return -ENODEV;
1048 :
1049 : return drm_get_pci_dev(pdev, ent, &driver);
1050 : }
1051 :
1052 : static void
1053 : i915_pci_remove(struct pci_dev *pdev)
1054 : {
1055 : struct drm_device *dev = pci_get_drvdata(pdev);
1056 :
1057 : drm_put_dev(dev);
1058 : }
1059 :
1060 : static int i915_pm_suspend(struct device *dev)
1061 : {
1062 : struct pci_dev *pdev = to_pci_dev(dev);
1063 : struct drm_device *drm_dev = pci_get_drvdata(pdev);
1064 :
1065 : if (!drm_dev || !drm_dev->dev_private) {
1066 : dev_err(dev, "DRM not initialized, aborting suspend.\n");
1067 : return -ENODEV;
1068 : }
1069 :
1070 : if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1071 : return 0;
1072 :
1073 : return i915_drm_suspend(drm_dev);
1074 : }
1075 :
1076 : static int i915_pm_suspend_late(struct device *dev)
1077 : {
1078 : struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1079 :
1080 : /*
1081 : * We have a suspend ordering issue with the snd-hda driver also
1082 : * requiring our device to be power up. Due to the lack of a
1083 : * parent/child relationship we currently solve this with an late
1084 : * suspend hook.
1085 : *
1086 : * FIXME: This should be solved with a special hdmi sink device or
1087 : * similar so that power domains can be employed.
1088 : */
1089 : if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1090 : return 0;
1091 :
1092 : return i915_drm_suspend_late(drm_dev, false);
1093 : }
1094 :
1095 : static int i915_pm_poweroff_late(struct device *dev)
1096 : {
1097 : struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1098 :
1099 : if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1100 : return 0;
1101 :
1102 : return i915_drm_suspend_late(drm_dev, true);
1103 : }
1104 :
1105 : static int i915_pm_resume_early(struct device *dev)
1106 : {
1107 : struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1108 :
1109 : if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1110 : return 0;
1111 :
1112 : return i915_drm_resume_early(drm_dev);
1113 : }
1114 :
1115 : static int i915_pm_resume(struct device *dev)
1116 : {
1117 : struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1118 :
1119 : if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1120 : return 0;
1121 :
1122 : return i915_drm_resume(drm_dev);
1123 : }
1124 : #endif
1125 :
1126 0 : static int skl_suspend_complete(struct drm_i915_private *dev_priv)
1127 : {
1128 : /* Enabling DC6 is not a hard requirement to enter runtime D3 */
1129 :
1130 0 : skl_uninit_cdclk(dev_priv);
1131 :
1132 0 : return 0;
1133 : }
1134 :
1135 0 : static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1136 : {
1137 0 : hsw_enable_pc8(dev_priv);
1138 :
1139 0 : return 0;
1140 : }
1141 :
1142 0 : static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1143 : {
1144 0 : struct drm_device *dev = dev_priv->dev;
1145 :
1146 : /* TODO: when DC5 support is added disable DC5 here. */
1147 :
1148 0 : broxton_ddi_phy_uninit(dev);
1149 0 : broxton_uninit_cdclk(dev);
1150 0 : bxt_enable_dc9(dev_priv);
1151 :
1152 0 : return 0;
1153 : }
1154 :
1155 0 : static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1156 : {
1157 0 : struct drm_device *dev = dev_priv->dev;
1158 :
1159 : /* TODO: when CSR FW support is added make sure the FW is loaded */
1160 :
1161 0 : bxt_disable_dc9(dev_priv);
1162 :
1163 : /*
1164 : * TODO: when DC5 support is added enable DC5 here if the CSR FW
1165 : * is available.
1166 : */
1167 0 : broxton_init_cdclk(dev);
1168 0 : broxton_ddi_phy_init(dev);
1169 0 : intel_prepare_ddi(dev);
1170 :
1171 0 : return 0;
1172 : }
1173 :
1174 0 : static int skl_resume_prepare(struct drm_i915_private *dev_priv)
1175 : {
1176 0 : struct drm_device *dev = dev_priv->dev;
1177 :
1178 0 : skl_init_cdclk(dev_priv);
1179 0 : intel_csr_load_program(dev);
1180 :
1181 0 : return 0;
1182 : }
1183 :
1184 : /*
1185 : * Save all Gunit registers that may be lost after a D3 and a subsequent
1186 : * S0i[R123] transition. The list of registers needing a save/restore is
1187 : * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1188 : * registers in the following way:
1189 : * - Driver: saved/restored by the driver
1190 : * - Punit : saved/restored by the Punit firmware
1191 : * - No, w/o marking: no need to save/restore, since the register is R/O or
1192 : * used internally by the HW in a way that doesn't depend
1193 : * keeping the content across a suspend/resume.
1194 : * - Debug : used for debugging
1195 : *
1196 : * We save/restore all registers marked with 'Driver', with the following
1197 : * exceptions:
1198 : * - Registers out of use, including also registers marked with 'Debug'.
1199 : * These have no effect on the driver's operation, so we don't save/restore
1200 : * them to reduce the overhead.
1201 : * - Registers that are fully setup by an initialization function called from
1202 : * the resume path. For example many clock gating and RPS/RC6 registers.
1203 : * - Registers that provide the right functionality with their reset defaults.
1204 : *
1205 : * TODO: Except for registers that based on the above 3 criteria can be safely
1206 : * ignored, we save/restore all others, practically treating the HW context as
1207 : * a black-box for the driver. Further investigation is needed to reduce the
1208 : * saved/restored registers even further, by following the same 3 criteria.
1209 : */
1210 0 : static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1211 : {
1212 0 : struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1213 : int i;
1214 :
1215 : /* GAM 0x4000-0x4770 */
1216 0 : s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1217 0 : s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1218 0 : s->arb_mode = I915_READ(ARB_MODE);
1219 0 : s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1220 0 : s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1221 :
1222 0 : for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1223 0 : s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
1224 :
1225 0 : s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1226 0 : s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
1227 :
1228 0 : s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1229 0 : s->ecochk = I915_READ(GAM_ECOCHK);
1230 0 : s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1231 0 : s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1232 :
1233 0 : s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1234 :
1235 : /* MBC 0x9024-0x91D0, 0x8500 */
1236 0 : s->g3dctl = I915_READ(VLV_G3DCTL);
1237 0 : s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1238 0 : s->mbctl = I915_READ(GEN6_MBCTL);
1239 :
1240 : /* GCP 0x9400-0x9424, 0x8100-0x810C */
1241 0 : s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1242 0 : s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1243 0 : s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1244 0 : s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1245 0 : s->rstctl = I915_READ(GEN6_RSTCTL);
1246 0 : s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1247 :
1248 : /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1249 0 : s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1250 0 : s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1251 0 : s->rpdeuc = I915_READ(GEN6_RPDEUC);
1252 0 : s->ecobus = I915_READ(ECOBUS);
1253 0 : s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1254 0 : s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1255 0 : s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1256 0 : s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1257 0 : s->rcedata = I915_READ(VLV_RCEDATA);
1258 0 : s->spare2gh = I915_READ(VLV_SPAREG2H);
1259 :
1260 : /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1261 0 : s->gt_imr = I915_READ(GTIMR);
1262 0 : s->gt_ier = I915_READ(GTIER);
1263 0 : s->pm_imr = I915_READ(GEN6_PMIMR);
1264 0 : s->pm_ier = I915_READ(GEN6_PMIER);
1265 :
1266 0 : for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1267 0 : s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
1268 :
1269 : /* GT SA CZ domain, 0x100000-0x138124 */
1270 0 : s->tilectl = I915_READ(TILECTL);
1271 0 : s->gt_fifoctl = I915_READ(GTFIFOCTL);
1272 0 : s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1273 0 : s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1274 0 : s->pmwgicz = I915_READ(VLV_PMWGICZ);
1275 :
1276 : /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1277 0 : s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1278 0 : s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1279 0 : s->pcbr = I915_READ(VLV_PCBR);
1280 0 : s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1281 :
1282 : /*
1283 : * Not saving any of:
1284 : * DFT, 0x9800-0x9EC0
1285 : * SARB, 0xB000-0xB1FC
1286 : * GAC, 0x5208-0x524C, 0x14000-0x14C000
1287 : * PCI CFG
1288 : */
1289 0 : }
1290 :
1291 0 : static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1292 : {
1293 0 : struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1294 : u32 val;
1295 : int i;
1296 :
1297 : /* GAM 0x4000-0x4770 */
1298 0 : I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1299 0 : I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1300 0 : I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1301 0 : I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1302 0 : I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1303 :
1304 0 : for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1305 0 : I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
1306 :
1307 0 : I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1308 0 : I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
1309 :
1310 0 : I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1311 0 : I915_WRITE(GAM_ECOCHK, s->ecochk);
1312 0 : I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1313 0 : I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1314 :
1315 0 : I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1316 :
1317 : /* MBC 0x9024-0x91D0, 0x8500 */
1318 0 : I915_WRITE(VLV_G3DCTL, s->g3dctl);
1319 0 : I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1320 0 : I915_WRITE(GEN6_MBCTL, s->mbctl);
1321 :
1322 : /* GCP 0x9400-0x9424, 0x8100-0x810C */
1323 0 : I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1324 0 : I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1325 0 : I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1326 0 : I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1327 0 : I915_WRITE(GEN6_RSTCTL, s->rstctl);
1328 0 : I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1329 :
1330 : /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1331 0 : I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1332 0 : I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1333 0 : I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1334 0 : I915_WRITE(ECOBUS, s->ecobus);
1335 0 : I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1336 0 : I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1337 0 : I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1338 0 : I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1339 0 : I915_WRITE(VLV_RCEDATA, s->rcedata);
1340 0 : I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1341 :
1342 : /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1343 0 : I915_WRITE(GTIMR, s->gt_imr);
1344 0 : I915_WRITE(GTIER, s->gt_ier);
1345 0 : I915_WRITE(GEN6_PMIMR, s->pm_imr);
1346 0 : I915_WRITE(GEN6_PMIER, s->pm_ier);
1347 :
1348 0 : for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1349 0 : I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
1350 :
1351 : /* GT SA CZ domain, 0x100000-0x138124 */
1352 0 : I915_WRITE(TILECTL, s->tilectl);
1353 0 : I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1354 : /*
1355 : * Preserve the GT allow wake and GFX force clock bit, they are not
1356 : * be restored, as they are used to control the s0ix suspend/resume
1357 : * sequence by the caller.
1358 : */
1359 0 : val = I915_READ(VLV_GTLC_WAKE_CTRL);
1360 0 : val &= VLV_GTLC_ALLOWWAKEREQ;
1361 0 : val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1362 0 : I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1363 :
1364 0 : val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1365 0 : val &= VLV_GFX_CLK_FORCE_ON_BIT;
1366 0 : val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1367 0 : I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1368 :
1369 0 : I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1370 :
1371 : /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1372 0 : I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1373 0 : I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1374 0 : I915_WRITE(VLV_PCBR, s->pcbr);
1375 0 : I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1376 0 : }
1377 :
1378 0 : int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1379 : {
1380 : u32 val;
1381 : int err;
1382 :
1383 : #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1384 :
1385 0 : val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1386 0 : val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1387 0 : if (force_on)
1388 0 : val |= VLV_GFX_CLK_FORCE_ON_BIT;
1389 0 : I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1390 :
1391 0 : if (!force_on)
1392 0 : return 0;
1393 :
1394 0 : err = wait_for(COND, 20);
1395 0 : if (err)
1396 0 : DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1397 : I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1398 :
1399 0 : return err;
1400 : #undef COND
1401 0 : }
1402 :
1403 0 : static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1404 : {
1405 : u32 val;
1406 : int err = 0;
1407 :
1408 0 : val = I915_READ(VLV_GTLC_WAKE_CTRL);
1409 0 : val &= ~VLV_GTLC_ALLOWWAKEREQ;
1410 0 : if (allow)
1411 0 : val |= VLV_GTLC_ALLOWWAKEREQ;
1412 0 : I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1413 0 : POSTING_READ(VLV_GTLC_WAKE_CTRL);
1414 :
1415 : #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1416 : allow)
1417 0 : err = wait_for(COND, 1);
1418 0 : if (err)
1419 0 : DRM_ERROR("timeout disabling GT waking\n");
1420 0 : return err;
1421 : #undef COND
1422 : }
1423 :
1424 0 : static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1425 : bool wait_for_on)
1426 : {
1427 : u32 mask;
1428 : u32 val;
1429 : int err;
1430 :
1431 : mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1432 0 : val = wait_for_on ? mask : 0;
1433 : #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1434 0 : if (COND)
1435 0 : return 0;
1436 :
1437 : DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1438 : wait_for_on ? "on" : "off",
1439 : I915_READ(VLV_GTLC_PW_STATUS));
1440 :
1441 : /*
1442 : * RC6 transitioning can be delayed up to 2 msec (see
1443 : * valleyview_enable_rps), use 3 msec for safety.
1444 : */
1445 0 : err = wait_for(COND, 3);
1446 0 : if (err)
1447 0 : DRM_ERROR("timeout waiting for GT wells to go %s\n",
1448 : wait_for_on ? "on" : "off");
1449 :
1450 0 : return err;
1451 : #undef COND
1452 0 : }
1453 :
1454 0 : static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1455 : {
1456 0 : if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1457 : return;
1458 :
1459 0 : DRM_ERROR("GT register access while GT waking disabled\n");
1460 0 : I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1461 0 : }
1462 :
1463 0 : static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1464 : {
1465 : u32 mask;
1466 : int err;
1467 :
1468 : /*
1469 : * Bspec defines the following GT well on flags as debug only, so
1470 : * don't treat them as hard failures.
1471 : */
1472 0 : (void)vlv_wait_for_gt_wells(dev_priv, false);
1473 :
1474 : mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1475 0 : WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1476 :
1477 0 : vlv_check_no_gt_access(dev_priv);
1478 :
1479 0 : err = vlv_force_gfx_clock(dev_priv, true);
1480 0 : if (err)
1481 : goto err1;
1482 :
1483 0 : err = vlv_allow_gt_wake(dev_priv, false);
1484 0 : if (err)
1485 : goto err2;
1486 :
1487 0 : if (!IS_CHERRYVIEW(dev_priv->dev))
1488 0 : vlv_save_gunit_s0ix_state(dev_priv);
1489 :
1490 0 : err = vlv_force_gfx_clock(dev_priv, false);
1491 0 : if (err)
1492 : goto err2;
1493 :
1494 0 : return 0;
1495 :
1496 : err2:
1497 : /* For safety always re-enable waking and disable gfx clock forcing */
1498 0 : vlv_allow_gt_wake(dev_priv, true);
1499 : err1:
1500 0 : vlv_force_gfx_clock(dev_priv, false);
1501 :
1502 0 : return err;
1503 0 : }
1504 :
1505 0 : static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1506 : bool rpm_resume)
1507 : {
1508 0 : struct drm_device *dev = dev_priv->dev;
1509 : int err;
1510 : int ret;
1511 :
1512 : /*
1513 : * If any of the steps fail just try to continue, that's the best we
1514 : * can do at this point. Return the first error code (which will also
1515 : * leave RPM permanently disabled).
1516 : */
1517 0 : ret = vlv_force_gfx_clock(dev_priv, true);
1518 :
1519 0 : if (!IS_CHERRYVIEW(dev_priv->dev))
1520 0 : vlv_restore_gunit_s0ix_state(dev_priv);
1521 :
1522 0 : err = vlv_allow_gt_wake(dev_priv, true);
1523 0 : if (!ret)
1524 0 : ret = err;
1525 :
1526 0 : err = vlv_force_gfx_clock(dev_priv, false);
1527 0 : if (!ret)
1528 0 : ret = err;
1529 :
1530 0 : vlv_check_no_gt_access(dev_priv);
1531 :
1532 0 : if (rpm_resume) {
1533 0 : intel_init_clock_gating(dev);
1534 0 : i915_gem_restore_fences(dev);
1535 0 : }
1536 :
1537 0 : return ret;
1538 : }
1539 :
1540 : #ifdef __linux__
1541 : static int intel_runtime_suspend(struct device *device)
1542 : {
1543 : struct pci_dev *pdev = to_pci_dev(device);
1544 : struct drm_device *dev = pci_get_drvdata(pdev);
1545 : struct drm_i915_private *dev_priv = dev->dev_private;
1546 : int ret;
1547 :
1548 : if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1549 : return -ENODEV;
1550 :
1551 : if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1552 : return -ENODEV;
1553 :
1554 : DRM_DEBUG_KMS("Suspending device\n");
1555 :
1556 : /*
1557 : * We could deadlock here in case another thread holding struct_mutex
1558 : * calls RPM suspend concurrently, since the RPM suspend will wait
1559 : * first for this RPM suspend to finish. In this case the concurrent
1560 : * RPM resume will be followed by its RPM suspend counterpart. Still
1561 : * for consistency return -EAGAIN, which will reschedule this suspend.
1562 : */
1563 : if (!mutex_trylock(&dev->struct_mutex)) {
1564 : DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1565 : /*
1566 : * Bump the expiration timestamp, otherwise the suspend won't
1567 : * be rescheduled.
1568 : */
1569 : pm_runtime_mark_last_busy(device);
1570 :
1571 : return -EAGAIN;
1572 : }
1573 : /*
1574 : * We are safe here against re-faults, since the fault handler takes
1575 : * an RPM reference.
1576 : */
1577 : i915_gem_release_all_mmaps(dev_priv);
1578 : mutex_unlock(&dev->struct_mutex);
1579 :
1580 : intel_guc_suspend(dev);
1581 :
1582 : intel_suspend_gt_powersave(dev);
1583 : intel_runtime_pm_disable_interrupts(dev_priv);
1584 :
1585 : ret = intel_suspend_complete(dev_priv);
1586 : if (ret) {
1587 : DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1588 : intel_runtime_pm_enable_interrupts(dev_priv);
1589 :
1590 : return ret;
1591 : }
1592 :
1593 : cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1594 : intel_uncore_forcewake_reset(dev, false);
1595 : dev_priv->pm.suspended = true;
1596 :
1597 : /*
1598 : * FIXME: We really should find a document that references the arguments
1599 : * used below!
1600 : */
1601 : if (IS_BROADWELL(dev)) {
1602 : /*
1603 : * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1604 : * being detected, and the call we do at intel_runtime_resume()
1605 : * won't be able to restore them. Since PCI_D3hot matches the
1606 : * actual specification and appears to be working, use it.
1607 : */
1608 : intel_opregion_notify_adapter(dev, PCI_D3hot);
1609 : } else {
1610 : /*
1611 : * current versions of firmware which depend on this opregion
1612 : * notification have repurposed the D1 definition to mean
1613 : * "runtime suspended" vs. what you would normally expect (D3)
1614 : * to distinguish it from notifications that might be sent via
1615 : * the suspend path.
1616 : */
1617 : intel_opregion_notify_adapter(dev, PCI_D1);
1618 : }
1619 :
1620 : assert_forcewakes_inactive(dev_priv);
1621 :
1622 : DRM_DEBUG_KMS("Device suspended\n");
1623 : return 0;
1624 : }
1625 :
1626 : static int intel_runtime_resume(struct device *device)
1627 : {
1628 : struct pci_dev *pdev = to_pci_dev(device);
1629 : struct drm_device *dev = pci_get_drvdata(pdev);
1630 : struct drm_i915_private *dev_priv = dev->dev_private;
1631 : int ret = 0;
1632 :
1633 : if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1634 : return -ENODEV;
1635 :
1636 : DRM_DEBUG_KMS("Resuming device\n");
1637 :
1638 : intel_opregion_notify_adapter(dev, PCI_D0);
1639 : dev_priv->pm.suspended = false;
1640 :
1641 : intel_guc_resume(dev);
1642 :
1643 : if (IS_GEN6(dev_priv))
1644 : intel_init_pch_refclk(dev);
1645 :
1646 : if (IS_BROXTON(dev))
1647 : ret = bxt_resume_prepare(dev_priv);
1648 : else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
1649 : ret = skl_resume_prepare(dev_priv);
1650 : else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1651 : hsw_disable_pc8(dev_priv);
1652 : else if (IS_VALLEYVIEW(dev_priv))
1653 : ret = vlv_resume_prepare(dev_priv, true);
1654 :
1655 : /*
1656 : * No point of rolling back things in case of an error, as the best
1657 : * we can do is to hope that things will still work (and disable RPM).
1658 : */
1659 : i915_gem_init_swizzling(dev);
1660 : gen6_update_ring_freq(dev);
1661 :
1662 : intel_runtime_pm_enable_interrupts(dev_priv);
1663 :
1664 : /*
1665 : * On VLV/CHV display interrupts are part of the display
1666 : * power well, so hpd is reinitialized from there. For
1667 : * everyone else do it here.
1668 : */
1669 : if (!IS_VALLEYVIEW(dev_priv))
1670 : intel_hpd_init(dev_priv);
1671 :
1672 : intel_enable_gt_powersave(dev);
1673 :
1674 : if (ret)
1675 : DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1676 : else
1677 : DRM_DEBUG_KMS("Device resumed\n");
1678 :
1679 : return ret;
1680 : }
1681 : #endif
1682 :
1683 : /*
1684 : * This function implements common functionality of runtime and system
1685 : * suspend sequence.
1686 : */
1687 0 : static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1688 : {
1689 : int ret;
1690 :
1691 0 : if (IS_BROXTON(dev_priv))
1692 0 : ret = bxt_suspend_complete(dev_priv);
1693 0 : else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1694 0 : ret = skl_suspend_complete(dev_priv);
1695 0 : else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1696 0 : ret = hsw_suspend_complete(dev_priv);
1697 0 : else if (IS_VALLEYVIEW(dev_priv))
1698 0 : ret = vlv_suspend_complete(dev_priv);
1699 : else
1700 : ret = 0;
1701 :
1702 0 : return ret;
1703 : }
1704 :
1705 : #ifdef __linux__
1706 : static const struct dev_pm_ops i915_pm_ops = {
1707 : /*
1708 : * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1709 : * PMSG_RESUME]
1710 : */
1711 : .suspend = i915_pm_suspend,
1712 : .suspend_late = i915_pm_suspend_late,
1713 : .resume_early = i915_pm_resume_early,
1714 : .resume = i915_pm_resume,
1715 :
1716 : /*
1717 : * S4 event handlers
1718 : * @freeze, @freeze_late : called (1) before creating the
1719 : * hibernation image [PMSG_FREEZE] and
1720 : * (2) after rebooting, before restoring
1721 : * the image [PMSG_QUIESCE]
1722 : * @thaw, @thaw_early : called (1) after creating the hibernation
1723 : * image, before writing it [PMSG_THAW]
1724 : * and (2) after failing to create or
1725 : * restore the image [PMSG_RECOVER]
1726 : * @poweroff, @poweroff_late: called after writing the hibernation
1727 : * image, before rebooting [PMSG_HIBERNATE]
1728 : * @restore, @restore_early : called after rebooting and restoring the
1729 : * hibernation image [PMSG_RESTORE]
1730 : */
1731 : .freeze = i915_pm_suspend,
1732 : .freeze_late = i915_pm_suspend_late,
1733 : .thaw_early = i915_pm_resume_early,
1734 : .thaw = i915_pm_resume,
1735 : .poweroff = i915_pm_suspend,
1736 : .poweroff_late = i915_pm_poweroff_late,
1737 : .restore_early = i915_pm_resume_early,
1738 : .restore = i915_pm_resume,
1739 :
1740 : /* S0ix (via runtime suspend) event handlers */
1741 : .runtime_suspend = intel_runtime_suspend,
1742 : .runtime_resume = intel_runtime_resume,
1743 : };
1744 :
1745 : static const struct vm_operations_struct i915_gem_vm_ops = {
1746 : .fault = i915_gem_fault,
1747 : .open = drm_gem_vm_open,
1748 : .close = drm_gem_vm_close,
1749 : };
1750 :
1751 : static const struct file_operations i915_driver_fops = {
1752 : .owner = THIS_MODULE,
1753 : .open = drm_open,
1754 : .release = drm_release,
1755 : .unlocked_ioctl = drm_ioctl,
1756 : .mmap = drm_gem_mmap,
1757 : .poll = drm_poll,
1758 : .read = drm_read,
1759 : #ifdef CONFIG_COMPAT
1760 : .compat_ioctl = i915_compat_ioctl,
1761 : #endif
1762 : .llseek = noop_llseek,
1763 : };
1764 : #endif
1765 :
1766 : static struct drm_driver driver = {
1767 : /* Don't use MTRRs here; the Xserver or userspace app should
1768 : * deal with them for Intel hardware.
1769 : */
1770 : .driver_features =
1771 : DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1772 : DRIVER_RENDER | DRIVER_MODESET,
1773 : #ifdef __linux__
1774 : .load = i915_driver_load,
1775 : .unload = i915_driver_unload,
1776 : #endif
1777 : .open = i915_driver_open,
1778 : .lastclose = i915_driver_lastclose,
1779 : .preclose = i915_driver_preclose,
1780 : .postclose = i915_driver_postclose,
1781 : #ifdef __linux__
1782 : .set_busid = drm_pci_set_busid,
1783 : #endif
1784 :
1785 : #if defined(CONFIG_DEBUG_FS)
1786 : .debugfs_init = i915_debugfs_init,
1787 : .debugfs_cleanup = i915_debugfs_cleanup,
1788 : #endif
1789 : .gem_free_object = i915_gem_free_object,
1790 : #ifdef __linux__
1791 : .gem_vm_ops = &i915_gem_vm_ops,
1792 : #else
1793 : .gem_fault = i915_gem_fault,
1794 : #endif
1795 :
1796 : .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1797 : .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1798 : .gem_prime_export = i915_gem_prime_export,
1799 : .gem_prime_import = i915_gem_prime_import,
1800 :
1801 : .dumb_create = i915_gem_dumb_create,
1802 : .dumb_map_offset = i915_gem_mmap_gtt,
1803 : .dumb_destroy = drm_gem_dumb_destroy,
1804 : .ioctls = i915_ioctls,
1805 : #ifdef __linux__
1806 : .fops = &i915_driver_fops,
1807 : #endif
1808 : .name = DRIVER_NAME,
1809 : .desc = DRIVER_DESC,
1810 : .date = DRIVER_DATE,
1811 : .major = DRIVER_MAJOR,
1812 : .minor = DRIVER_MINOR,
1813 : .patchlevel = DRIVER_PATCHLEVEL,
1814 : };
1815 :
1816 : #ifdef __linux__
1817 :
1818 : static struct pci_driver i915_pci_driver = {
1819 : .name = DRIVER_NAME,
1820 : .id_table = pciidlist,
1821 : .probe = i915_pci_probe,
1822 : .remove = i915_pci_remove,
1823 : .driver.pm = &i915_pm_ops,
1824 : };
1825 :
1826 : static int __init i915_init(void)
1827 : {
1828 : driver.num_ioctls = i915_max_ioctl;
1829 :
1830 : /*
1831 : * Enable KMS by default, unless explicitly overriden by
1832 : * either the i915.modeset prarameter or by the
1833 : * vga_text_mode_force boot option.
1834 : */
1835 :
1836 : if (i915.modeset == 0)
1837 : driver.driver_features &= ~DRIVER_MODESET;
1838 :
1839 : #ifdef CONFIG_VGA_CONSOLE
1840 : if (vgacon_text_force() && i915.modeset == -1)
1841 : driver.driver_features &= ~DRIVER_MODESET;
1842 : #endif
1843 :
1844 : if (!(driver.driver_features & DRIVER_MODESET)) {
1845 : /* Silently fail loading to not upset userspace. */
1846 : DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1847 : return 0;
1848 : }
1849 :
1850 : if (i915.nuclear_pageflip)
1851 : driver.driver_features |= DRIVER_ATOMIC;
1852 :
1853 : return drm_pci_init(&driver, &i915_pci_driver);
1854 : }
1855 :
1856 : static void __exit i915_exit(void)
1857 : {
1858 : if (!(driver.driver_features & DRIVER_MODESET))
1859 : return; /* Never loaded a driver. */
1860 :
1861 : drm_pci_exit(&driver, &i915_pci_driver);
1862 : }
1863 :
1864 : module_init(i915_init);
1865 : module_exit(i915_exit);
1866 :
1867 : #endif
1868 :
1869 : MODULE_AUTHOR("Tungsten Graphics, Inc.");
1870 : MODULE_AUTHOR("Intel Corporation");
1871 :
1872 : MODULE_DESCRIPTION(DRIVER_DESC);
1873 : MODULE_LICENSE("GPL and additional rights");
1874 :
1875 : #ifdef __OpenBSD__
1876 :
1877 : #ifdef __amd64__
1878 : #include "efifb.h"
1879 : #endif
1880 :
1881 : #if NEFIFB > 0
1882 : #include <machine/efifbvar.h>
1883 : #endif
1884 :
1885 : #include "intagp.h"
1886 :
1887 : #if NINTAGP > 0
1888 : int intagpsubmatch(struct device *, void *, void *);
1889 : int intagp_print(void *, const char *);
1890 :
1891 : int
1892 0 : intagpsubmatch(struct device *parent, void *match, void *aux)
1893 : {
1894 : extern struct cfdriver intagp_cd;
1895 0 : struct cfdata *cf = match;
1896 :
1897 : /* only allow intagp to attach */
1898 0 : if (cf->cf_driver == &intagp_cd)
1899 0 : return ((*cf->cf_attach->ca_match)(parent, match, aux));
1900 0 : return (0);
1901 0 : }
1902 :
1903 : int
1904 0 : intagp_print(void *vaa, const char *pnp)
1905 : {
1906 0 : if (pnp)
1907 0 : printf("intagp at %s", pnp);
1908 0 : return (UNCONF);
1909 : }
1910 : #endif
1911 :
1912 : int inteldrm_wsioctl(void *, u_long, caddr_t, int, struct proc *);
1913 : paddr_t inteldrm_wsmmap(void *, off_t, int);
1914 : int inteldrm_alloc_screen(void *, const struct wsscreen_descr *,
1915 : void **, int *, int *, long *);
1916 : void inteldrm_free_screen(void *, void *);
1917 : int inteldrm_show_screen(void *, void *, int,
1918 : void (*)(void *, int, int), void *);
1919 : void inteldrm_doswitch(void *);
1920 : void inteldrm_enter_ddb(void *, void *);
1921 : int inteldrm_load_font(void *, void *, struct wsdisplay_font *);
1922 : int inteldrm_list_font(void *, struct wsdisplay_font *);
1923 : int inteldrm_getchar(void *, int, int, struct wsdisplay_charcell *);
1924 : void inteldrm_burner(void *, u_int, u_int);
1925 : void inteldrm_burner_cb(void *);
1926 : void inteldrm_scrollback(void *, void *, int lines);
1927 :
1928 : struct wsscreen_descr inteldrm_stdscreen = {
1929 : "std",
1930 : 0, 0,
1931 : 0,
1932 : 0, 0,
1933 : WSSCREEN_UNDERLINE | WSSCREEN_HILIT |
1934 : WSSCREEN_REVERSE | WSSCREEN_WSCOLORS
1935 : };
1936 :
1937 : const struct wsscreen_descr *inteldrm_scrlist[] = {
1938 : &inteldrm_stdscreen,
1939 : };
1940 :
1941 : struct wsscreen_list inteldrm_screenlist = {
1942 : nitems(inteldrm_scrlist), inteldrm_scrlist
1943 : };
1944 :
1945 : struct wsdisplay_accessops inteldrm_accessops = {
1946 : .ioctl = inteldrm_wsioctl,
1947 : .mmap = inteldrm_wsmmap,
1948 : .alloc_screen = inteldrm_alloc_screen,
1949 : .free_screen = inteldrm_free_screen,
1950 : .show_screen = inteldrm_show_screen,
1951 : .enter_ddb = inteldrm_enter_ddb,
1952 : .getchar = inteldrm_getchar,
1953 : .load_font = inteldrm_load_font,
1954 : .list_font = inteldrm_list_font,
1955 : .scrollback = inteldrm_scrollback,
1956 : .burn_screen = inteldrm_burner
1957 : };
1958 :
1959 : extern int (*ws_get_param)(struct wsdisplay_param *);
1960 : extern int (*ws_set_param)(struct wsdisplay_param *);
1961 :
1962 : int
1963 0 : inteldrm_wsioctl(void *v, u_long cmd, caddr_t data, int flag, struct proc *p)
1964 : {
1965 0 : struct inteldrm_softc *dev_priv = v;
1966 0 : struct backlight_device *bd = dev_priv->backlight;
1967 0 : struct rasops_info *ri = &dev_priv->ro;
1968 : struct wsdisplay_fbinfo *wdf;
1969 0 : struct wsdisplay_param *dp = (struct wsdisplay_param *)data;
1970 :
1971 0 : switch (cmd) {
1972 : case WSDISPLAYIO_GTYPE:
1973 0 : *(int *)data = WSDISPLAY_TYPE_INTELDRM;
1974 0 : return 0;
1975 : case WSDISPLAYIO_GINFO:
1976 0 : wdf = (struct wsdisplay_fbinfo *)data;
1977 0 : wdf->width = ri->ri_width;
1978 0 : wdf->height = ri->ri_height;
1979 0 : wdf->depth = ri->ri_depth;
1980 0 : wdf->cmsize = 0;
1981 0 : return 0;
1982 : case WSDISPLAYIO_GETPARAM:
1983 0 : if (ws_get_param && ws_get_param(dp) == 0)
1984 0 : return 0;
1985 :
1986 0 : if (bd == NULL)
1987 0 : return -1;
1988 :
1989 0 : switch (dp->param) {
1990 : case WSDISPLAYIO_PARAM_BRIGHTNESS:
1991 0 : dp->min = 0;
1992 0 : dp->max = bd->props.max_brightness;
1993 0 : dp->curval = bd->ops->get_brightness(bd);
1994 0 : return (dp->max > dp->min) ? 0 : -1;
1995 : }
1996 : break;
1997 : case WSDISPLAYIO_SETPARAM:
1998 0 : if (ws_set_param && ws_set_param(dp) == 0)
1999 0 : return 0;
2000 :
2001 0 : if (bd == NULL || dp->curval > bd->props.max_brightness)
2002 0 : return -1;
2003 :
2004 0 : switch (dp->param) {
2005 : case WSDISPLAYIO_PARAM_BRIGHTNESS:
2006 0 : bd->props.brightness = dp->curval;
2007 0 : backlight_update_status(bd);
2008 0 : return 0;
2009 : }
2010 : break;
2011 : }
2012 :
2013 0 : return (-1);
2014 0 : }
2015 :
2016 : paddr_t
2017 0 : inteldrm_wsmmap(void *v, off_t off, int prot)
2018 : {
2019 0 : return (-1);
2020 : }
2021 :
2022 : int
2023 0 : inteldrm_alloc_screen(void *v, const struct wsscreen_descr *type,
2024 : void **cookiep, int *curxp, int *curyp, long *attrp)
2025 : {
2026 0 : struct inteldrm_softc *dev_priv = v;
2027 0 : struct rasops_info *ri = &dev_priv->ro;
2028 :
2029 0 : return rasops_alloc_screen(ri, cookiep, curxp, curyp, attrp);
2030 : }
2031 :
2032 : void
2033 0 : inteldrm_free_screen(void *v, void *cookie)
2034 : {
2035 0 : struct inteldrm_softc *dev_priv = v;
2036 0 : struct rasops_info *ri = &dev_priv->ro;
2037 :
2038 0 : return rasops_free_screen(ri, cookie);
2039 0 : }
2040 :
2041 : int
2042 0 : inteldrm_show_screen(void *v, void *cookie, int waitok,
2043 : void (*cb)(void *, int, int), void *cbarg)
2044 : {
2045 0 : struct inteldrm_softc *dev_priv = v;
2046 0 : struct rasops_info *ri = &dev_priv->ro;
2047 :
2048 0 : if (cookie == ri->ri_active)
2049 0 : return (0);
2050 :
2051 0 : dev_priv->switchcb = cb;
2052 0 : dev_priv->switchcbarg = cbarg;
2053 0 : dev_priv->switchcookie = cookie;
2054 0 : if (cb) {
2055 0 : task_add(systq, &dev_priv->switchtask);
2056 0 : return (EAGAIN);
2057 : }
2058 :
2059 0 : inteldrm_doswitch(v);
2060 :
2061 0 : return (0);
2062 0 : }
2063 :
2064 : void
2065 0 : inteldrm_doswitch(void *v)
2066 : {
2067 0 : struct inteldrm_softc *dev_priv = v;
2068 0 : struct rasops_info *ri = &dev_priv->ro;
2069 0 : struct drm_device *dev = dev_priv->dev;
2070 :
2071 0 : rasops_show_screen(ri, dev_priv->switchcookie, 0, NULL, NULL);
2072 0 : intel_fbdev_restore_mode(dev);
2073 :
2074 0 : if (dev_priv->switchcb)
2075 0 : (*dev_priv->switchcb)(dev_priv->switchcbarg, 0, 0);
2076 0 : }
2077 :
2078 : void
2079 0 : inteldrm_enter_ddb(void *v, void *cookie)
2080 : {
2081 0 : struct inteldrm_softc *dev_priv = v;
2082 0 : struct rasops_info *ri = &dev_priv->ro;
2083 0 : struct drm_fb_helper *helper = &dev_priv->fbdev->helper;
2084 :
2085 0 : if (cookie == ri->ri_active)
2086 0 : return;
2087 :
2088 0 : rasops_show_screen(ri, cookie, 0, NULL, NULL);
2089 0 : drm_fb_helper_debug_enter(helper->fbdev);
2090 0 : }
2091 :
2092 : int
2093 0 : inteldrm_getchar(void *v, int row, int col, struct wsdisplay_charcell *cell)
2094 : {
2095 0 : struct inteldrm_softc *dev_priv = v;
2096 0 : struct rasops_info *ri = &dev_priv->ro;
2097 :
2098 0 : return rasops_getchar(ri, row, col, cell);
2099 : }
2100 :
2101 : int
2102 0 : inteldrm_load_font(void *v, void *cookie, struct wsdisplay_font *font)
2103 : {
2104 0 : struct inteldrm_softc *dev_priv = v;
2105 0 : struct rasops_info *ri = &dev_priv->ro;
2106 :
2107 0 : return rasops_load_font(ri, cookie, font);
2108 : }
2109 :
2110 : int
2111 0 : inteldrm_list_font(void *v, struct wsdisplay_font *font)
2112 : {
2113 0 : struct inteldrm_softc *dev_priv = v;
2114 0 : struct rasops_info *ri = &dev_priv->ro;
2115 :
2116 0 : return rasops_list_font(ri, font);
2117 : }
2118 :
2119 : void
2120 0 : inteldrm_burner(void *v, u_int on, u_int flags)
2121 : {
2122 0 : struct inteldrm_softc *dev_priv = v;
2123 :
2124 0 : task_del(systq, &dev_priv->burner_task);
2125 :
2126 0 : if (on)
2127 0 : dev_priv->burner_fblank = FB_BLANK_UNBLANK;
2128 : else {
2129 0 : if (flags & WSDISPLAY_BURN_VBLANK)
2130 0 : dev_priv->burner_fblank = FB_BLANK_VSYNC_SUSPEND;
2131 : else
2132 0 : dev_priv->burner_fblank = FB_BLANK_NORMAL;
2133 : }
2134 :
2135 : /*
2136 : * Setting the DPMS mode may sleep while waiting for the display
2137 : * to come back on so hand things off to a taskq.
2138 : */
2139 0 : task_add(systq, &dev_priv->burner_task);
2140 0 : }
2141 :
2142 : void
2143 0 : inteldrm_burner_cb(void *arg1)
2144 : {
2145 0 : struct inteldrm_softc *dev_priv = arg1;
2146 0 : struct drm_fb_helper *helper = &dev_priv->fbdev->helper;
2147 :
2148 0 : drm_fb_helper_blank(dev_priv->burner_fblank, helper->fbdev);
2149 0 : }
2150 :
2151 : int
2152 0 : inteldrm_backlight_update_status(struct backlight_device *bd)
2153 : {
2154 0 : struct wsdisplay_param dp;
2155 :
2156 0 : dp.param = WSDISPLAYIO_PARAM_BRIGHTNESS;
2157 0 : dp.curval = bd->props.brightness;
2158 0 : ws_set_param(&dp);
2159 0 : return 0;
2160 0 : }
2161 :
2162 : int
2163 0 : inteldrm_backlight_get_brightness(struct backlight_device *bd)
2164 : {
2165 0 : struct wsdisplay_param dp;
2166 :
2167 0 : dp.param = WSDISPLAYIO_PARAM_BRIGHTNESS;
2168 0 : ws_get_param(&dp);
2169 0 : return dp.curval;
2170 0 : }
2171 :
2172 : const struct backlight_ops inteldrm_backlight_ops = {
2173 : .update_status = inteldrm_backlight_update_status,
2174 : .get_brightness = inteldrm_backlight_get_brightness
2175 : };
2176 :
2177 : void
2178 0 : inteldrm_scrollback(void *v, void *cookie, int lines)
2179 : {
2180 0 : struct inteldrm_softc *dev_priv = v;
2181 0 : struct rasops_info *ri = &dev_priv->ro;
2182 :
2183 0 : rasops_scrollback(ri, cookie, lines);
2184 0 : }
2185 :
2186 : int inteldrm_match(struct device *, void *, void *);
2187 : void inteldrm_attach(struct device *, struct device *, void *);
2188 : int inteldrm_detach(struct device *, int);
2189 : int inteldrm_activate(struct device *, int);
2190 :
2191 : struct cfattach inteldrm_ca = {
2192 : sizeof(struct inteldrm_softc), inteldrm_match, inteldrm_attach,
2193 : inteldrm_detach, inteldrm_activate
2194 : };
2195 :
2196 : struct cfdriver inteldrm_cd = {
2197 : 0, "inteldrm", DV_DULL
2198 : };
2199 :
2200 : void inteldrm_init_backlight(struct inteldrm_softc *);
2201 : int inteldrm_intr(void *);
2202 :
2203 : int
2204 0 : inteldrm_match(struct device *parent, void *match, void *aux)
2205 : {
2206 0 : struct pci_attach_args *pa = aux;
2207 :
2208 0 : if (drm_pciprobe(aux, pciidlist) && pa->pa_function == 0)
2209 0 : return 20;
2210 0 : return 0;
2211 0 : }
2212 :
2213 : void
2214 0 : inteldrm_attach(struct device *parent, struct device *self, void *aux)
2215 : {
2216 0 : struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
2217 : struct drm_device *dev;
2218 0 : struct pci_attach_args *pa = aux;
2219 : const struct drm_pcidev *id;
2220 : struct intel_device_info *info, *device_info;
2221 0 : struct rasops_info *ri = &dev_priv->ro;
2222 0 : struct wsemuldisplaydev_attach_args aa;
2223 : extern int vga_console_attached;
2224 : int mmio_bar, mmio_size, mmio_type;
2225 : int console = 0;
2226 :
2227 0 : dev_priv->pc = pa->pa_pc;
2228 0 : dev_priv->tag = pa->pa_tag;
2229 0 : dev_priv->dmat = pa->pa_dmat;
2230 0 : dev_priv->bst = pa->pa_memt;
2231 0 : dev_priv->memex = pa->pa_memex;
2232 0 : dev_priv->regs = &dev_priv->bar;
2233 :
2234 0 : if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
2235 0 : PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA &&
2236 0 : (pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
2237 0 : & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
2238 0 : == (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) {
2239 0 : vga_console_attached = 1;
2240 : console = 1;
2241 0 : }
2242 :
2243 : #if NEFIFB > 0
2244 0 : if (efifb_is_console(pa))
2245 0 : console = 1;
2246 : #endif
2247 :
2248 0 : printf("\n");
2249 :
2250 0 : driver.num_ioctls = i915_max_ioctl;
2251 :
2252 0 : if (i915.nuclear_pageflip)
2253 0 : driver.driver_features |= DRIVER_ATOMIC;
2254 :
2255 0 : dev_priv->dev = dev = (struct drm_device *)
2256 0 : drm_attach_pci(&driver, pa, 0, console, self);
2257 :
2258 0 : id = drm_find_description(PCI_VENDOR(pa->pa_id),
2259 0 : PCI_PRODUCT(pa->pa_id), pciidlist);
2260 0 : info = (struct intel_device_info *)id->driver_data;
2261 :
2262 : /* Setup the write-once "constant" device info */
2263 0 : device_info = (struct intel_device_info *)&dev_priv->info;
2264 0 : memcpy(device_info, info, sizeof(dev_priv->info));
2265 0 : device_info->device_id = dev->pdev->device;
2266 :
2267 0 : mmio_bar = IS_GEN2(dev) ? 0x14 : 0x10;
2268 : /* Before gen4, the registers and the GTT are behind different BARs.
2269 : * However, from gen4 onwards, the registers and the GTT are shared
2270 : * in the same BAR, so we want to restrict this ioremap from
2271 : * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2272 : * the register BAR remains the same size for all the earlier
2273 : * generations up to Ironlake.
2274 : */
2275 0 : if (info->gen < 5)
2276 0 : mmio_size = 512*1024;
2277 : else
2278 : mmio_size = 2*1024*1024;
2279 :
2280 0 : mmio_type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, mmio_bar);
2281 0 : if (pci_mapreg_map(pa, mmio_bar, mmio_type, 0, &dev_priv->regs->bst,
2282 0 : &dev_priv->regs->bsh, &dev_priv->regs->base,
2283 0 : &dev_priv->regs->size, mmio_size)) {
2284 0 : printf("%s: can't map registers\n",
2285 0 : dev_priv->sc_dev.dv_xname);
2286 0 : return;
2287 : }
2288 :
2289 : #if NINTAGP > 0
2290 0 : if (info->gen <= 5) {
2291 0 : config_found_sm(self, aux, intagp_print, intagpsubmatch);
2292 0 : dev->agp = drm_agp_init();
2293 0 : if (dev->agp) {
2294 0 : if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
2295 0 : dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
2296 0 : dev->agp->mtrr = 1;
2297 : }
2298 : }
2299 : #endif
2300 :
2301 0 : if (IS_I945G(dev) || IS_I945GM(dev))
2302 0 : pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
2303 :
2304 0 : if (pci_intr_map_msi(pa, &dev_priv->ih) != 0 &&
2305 0 : pci_intr_map(pa, &dev_priv->ih) != 0) {
2306 0 : printf("%s: couldn't map interrupt\n",
2307 0 : dev_priv->sc_dev.dv_xname);
2308 0 : return;
2309 : }
2310 :
2311 0 : printf("%s: %s\n", dev_priv->sc_dev.dv_xname,
2312 0 : pci_intr_string(dev_priv->pc, dev_priv->ih));
2313 :
2314 0 : dev_priv->irqh = pci_intr_establish(dev_priv->pc, dev_priv->ih,
2315 0 : IPL_TTY, inteldrm_intr, dev_priv, dev_priv->sc_dev.dv_xname);
2316 0 : if (dev_priv->irqh == NULL) {
2317 0 : printf("%s: couldn't establish interrupt\n",
2318 : dev_priv->sc_dev.dv_xname);
2319 0 : return;
2320 : }
2321 0 : dev->pdev->irq = -1;
2322 :
2323 0 : if (i915_driver_load(dev, id->driver_data))
2324 0 : return;
2325 :
2326 : #if NEFIFB > 0
2327 0 : if (efifb_is_console(pa))
2328 0 : efifb_cndetach();
2329 : #endif
2330 :
2331 0 : printf("%s: %dx%d, %dbpp\n", dev_priv->sc_dev.dv_xname,
2332 0 : ri->ri_width, ri->ri_height, ri->ri_depth);
2333 :
2334 0 : intel_fbdev_restore_mode(dev);
2335 :
2336 0 : inteldrm_init_backlight(dev_priv);
2337 :
2338 0 : ri->ri_flg = RI_CENTER | RI_WRONLY | RI_VCONS | RI_CLEAR;
2339 0 : if (ri->ri_width < ri->ri_height) {
2340 : pcireg_t subsys;
2341 :
2342 : #define PCI_PRODUCT_ASUSTEK_T100HA 0x1bdd
2343 :
2344 : /*
2345 : * Asus T100HA needs to be rotated counter-clockwise.
2346 : * Everybody else seems to mount their panels the
2347 : * other way around.
2348 : */
2349 0 : subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
2350 : PCI_SUBSYS_ID_REG);
2351 0 : if (PCI_VENDOR(subsys) == PCI_VENDOR_ASUSTEK &&
2352 0 : PCI_PRODUCT(subsys) == PCI_PRODUCT_ASUSTEK_T100HA)
2353 0 : ri->ri_flg |= RI_ROTATE_CCW;
2354 : else
2355 0 : ri->ri_flg |= RI_ROTATE_CW;
2356 0 : }
2357 0 : ri->ri_hw = dev_priv;
2358 0 : rasops_init(ri, 160, 160);
2359 :
2360 0 : task_set(&dev_priv->switchtask, inteldrm_doswitch, dev_priv);
2361 0 : task_set(&dev_priv->burner_task, inteldrm_burner_cb, dev_priv);
2362 :
2363 0 : inteldrm_stdscreen.capabilities = ri->ri_caps;
2364 0 : inteldrm_stdscreen.nrows = ri->ri_rows;
2365 0 : inteldrm_stdscreen.ncols = ri->ri_cols;
2366 0 : inteldrm_stdscreen.textops = &ri->ri_ops;
2367 0 : inteldrm_stdscreen.fontwidth = ri->ri_font->fontwidth;
2368 0 : inteldrm_stdscreen.fontheight = ri->ri_font->fontheight;
2369 :
2370 0 : aa.console = console;
2371 0 : aa.scrdata = &inteldrm_screenlist;
2372 0 : aa.accessops = &inteldrm_accessops;
2373 0 : aa.accesscookie = dev_priv;
2374 0 : aa.defaultscreens = 0;
2375 :
2376 0 : if (console) {
2377 0 : long defattr;
2378 :
2379 : /*
2380 : * Clear the entire screen if we're doing rotation to
2381 : * make sure no unrotated content survives.
2382 : */
2383 0 : if (ri->ri_flg & (RI_ROTATE_CW | RI_ROTATE_CCW))
2384 0 : memset(ri->ri_bits, 0, ri->ri_height * ri->ri_stride);
2385 :
2386 0 : ri->ri_ops.alloc_attr(ri->ri_active, 0, 0, 0, &defattr);
2387 0 : wsdisplay_cnattach(&inteldrm_stdscreen, ri->ri_active,
2388 0 : 0, 0, defattr);
2389 0 : }
2390 :
2391 0 : config_found_sm(self, &aa, wsemuldisplaydevprint,
2392 : wsemuldisplaydevsubmatch);
2393 0 : return;
2394 0 : }
2395 :
2396 : int
2397 0 : inteldrm_detach(struct device *self, int flags)
2398 : {
2399 0 : return 0;
2400 : }
2401 :
2402 : int
2403 0 : inteldrm_activate(struct device *self, int act)
2404 : {
2405 0 : struct inteldrm_softc *dev_priv = (struct inteldrm_softc *)self;
2406 0 : struct drm_device *dev = dev_priv->dev;
2407 : int rv = 0;
2408 :
2409 0 : if (dev == NULL)
2410 0 : return (0);
2411 :
2412 0 : switch (act) {
2413 : case DVACT_QUIESCE:
2414 0 : rv = config_suspend((struct device *)dev, act);
2415 0 : i915_drm_suspend(dev);
2416 0 : i915_drm_suspend_late(dev, false);
2417 0 : break;
2418 : case DVACT_SUSPEND:
2419 0 : if (dev->agp)
2420 0 : config_suspend(dev->agp->agpdev->sc_chipc, act);
2421 : break;
2422 : case DVACT_RESUME:
2423 0 : if (dev->agp)
2424 0 : config_suspend(dev->agp->agpdev->sc_chipc, act);
2425 : break;
2426 : case DVACT_WAKEUP:
2427 0 : i915_drm_resume_early(dev);
2428 0 : i915_drm_resume(dev);
2429 0 : intel_fbdev_restore_mode(dev);
2430 0 : rv = config_suspend((struct device *)dev, act);
2431 0 : break;
2432 : }
2433 :
2434 0 : return (rv);
2435 0 : }
2436 :
2437 : void
2438 0 : inteldrm_native_backlight(struct inteldrm_softc *dev_priv)
2439 : {
2440 0 : struct drm_device *dev = dev_priv->dev;
2441 : struct intel_connector *intel_connector;
2442 :
2443 0 : list_for_each_entry(intel_connector,
2444 : &dev->mode_config.connector_list, base.head) {
2445 : struct drm_connector *connector = &intel_connector->base;
2446 0 : struct intel_panel *panel = &intel_connector->panel;
2447 0 : struct backlight_device *bd = panel->backlight.device;
2448 :
2449 0 : if (!panel->backlight.present)
2450 0 : continue;
2451 :
2452 0 : connector->backlight_device = bd;
2453 0 : connector->backlight_property = drm_property_create_range(dev,
2454 0 : 0, "Backlight", 0, bd->props.max_brightness);
2455 0 : drm_object_attach_property(&connector->base,
2456 0 : connector->backlight_property, bd->props.brightness);
2457 :
2458 : /*
2459 : * Use backlight from the first connector that has one
2460 : * for wscons(4).
2461 : */
2462 0 : if (dev_priv->backlight == NULL)
2463 0 : dev_priv->backlight = bd;
2464 0 : }
2465 0 : }
2466 :
2467 : void
2468 0 : inteldrm_firmware_backlight(struct inteldrm_softc *dev_priv,
2469 : struct wsdisplay_param *dp)
2470 : {
2471 0 : struct drm_device *dev = dev_priv->dev;
2472 : struct intel_connector *intel_connector;
2473 0 : struct backlight_properties props;
2474 : struct backlight_device *bd;
2475 :
2476 0 : memset(&props, 0, sizeof(props));
2477 0 : props.type = BACKLIGHT_FIRMWARE;
2478 0 : props.brightness = dp->curval;
2479 0 : bd = backlight_device_register(dev->device.dv_xname, NULL, NULL,
2480 : &inteldrm_backlight_ops, &props);
2481 :
2482 0 : list_for_each_entry(intel_connector,
2483 : &dev->mode_config.connector_list, base.head) {
2484 : struct drm_connector *connector = &intel_connector->base;
2485 :
2486 0 : if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
2487 0 : connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
2488 0 : connector->connector_type != DRM_MODE_CONNECTOR_DSI)
2489 0 : continue;
2490 :
2491 0 : connector->backlight_device = bd;
2492 0 : connector->backlight_property = drm_property_create_range(dev,
2493 0 : 0, "Backlight", dp->min, dp->max);
2494 0 : drm_object_attach_property(&connector->base,
2495 0 : connector->backlight_property, dp->curval);
2496 0 : }
2497 0 : }
2498 :
2499 : void
2500 0 : inteldrm_init_backlight(struct inteldrm_softc *dev_priv)
2501 : {
2502 0 : struct drm_device *dev = dev_priv->dev;
2503 0 : struct wsdisplay_param dp;
2504 :
2505 0 : drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2506 :
2507 0 : dp.param = WSDISPLAYIO_PARAM_BRIGHTNESS;
2508 0 : if (ws_get_param && ws_get_param(&dp) == 0)
2509 0 : inteldrm_firmware_backlight(dev_priv, &dp);
2510 : else
2511 0 : inteldrm_native_backlight(dev_priv);
2512 :
2513 0 : drm_modeset_unlock(&dev->mode_config.connection_mutex);
2514 0 : }
2515 :
2516 : int
2517 0 : inteldrm_intr(void *arg)
2518 : {
2519 0 : struct inteldrm_softc *dev_priv = arg;
2520 0 : struct drm_device *dev = dev_priv->dev;
2521 :
2522 0 : return dev->driver->irq_handler(0, dev);
2523 : }
2524 :
2525 : #endif
|