Line data Source code
1 : /*
2 : * Copyright © 2006-2007 Intel Corporation
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice (including the next
12 : * paragraph) shall be included in all copies or substantial portions of the
13 : * Software.
14 : *
15 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 : * DEALINGS IN THE SOFTWARE.
22 : *
23 : * Authors:
24 : * Eric Anholt <eric@anholt.net>
25 : */
26 :
27 : #ifdef __linux__
28 : #include <linux/dmi.h>
29 : #include <linux/module.h>
30 : #include <linux/input.h>
31 : #include <linux/i2c.h>
32 : #include <linux/kernel.h>
33 : #include <linux/slab.h>
34 : #include <linux/vgaarb.h>
35 : #endif
36 : #include <dev/pci/drm/drmP.h>
37 : #include <dev/pci/drm/drm_edid.h>
38 : #include "intel_drv.h"
39 : #include <dev/pci/drm/i915_drm.h>
40 : #include "i915_drv.h"
41 : #include "i915_trace.h"
42 : #include <dev/pci/drm/drm_atomic.h>
43 : #include <dev/pci/drm/drm_atomic_helper.h>
44 : #include <dev/pci/drm/drm_dp_helper.h>
45 : #include <dev/pci/drm/drm_crtc_helper.h>
46 : #include <dev/pci/drm/drm_plane_helper.h>
47 : #include <dev/pci/drm/drm_rect.h>
48 : #ifdef __linux__
49 : #include <linux/dma_remapping.h>
50 : #endif
51 :
52 : /* Primary plane formats for gen <= 3 */
53 : static const uint32_t i8xx_primary_formats[] = {
54 : DRM_FORMAT_C8,
55 : DRM_FORMAT_RGB565,
56 : DRM_FORMAT_XRGB1555,
57 : DRM_FORMAT_XRGB8888,
58 : };
59 :
60 : /* Primary plane formats for gen >= 4 */
61 : static const uint32_t i965_primary_formats[] = {
62 : DRM_FORMAT_C8,
63 : DRM_FORMAT_RGB565,
64 : DRM_FORMAT_XRGB8888,
65 : DRM_FORMAT_XBGR8888,
66 : DRM_FORMAT_XRGB2101010,
67 : DRM_FORMAT_XBGR2101010,
68 : };
69 :
70 : static const uint32_t skl_primary_formats[] = {
71 : DRM_FORMAT_C8,
72 : DRM_FORMAT_RGB565,
73 : DRM_FORMAT_XRGB8888,
74 : DRM_FORMAT_XBGR8888,
75 : DRM_FORMAT_ARGB8888,
76 : DRM_FORMAT_ABGR8888,
77 : DRM_FORMAT_XRGB2101010,
78 : DRM_FORMAT_XBGR2101010,
79 : DRM_FORMAT_YUYV,
80 : DRM_FORMAT_YVYU,
81 : DRM_FORMAT_UYVY,
82 : DRM_FORMAT_VYUY,
83 : };
84 :
85 : /* Cursor formats */
86 : static const uint32_t intel_cursor_formats[] = {
87 : DRM_FORMAT_ARGB8888,
88 : };
89 :
90 : static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
91 :
92 : static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
93 : struct intel_crtc_state *pipe_config);
94 : static void ironlake_pch_clock_get(struct intel_crtc *crtc,
95 : struct intel_crtc_state *pipe_config);
96 :
97 : static int intel_framebuffer_init(struct drm_device *dev,
98 : struct intel_framebuffer *ifb,
99 : struct drm_mode_fb_cmd2 *mode_cmd,
100 : struct drm_i915_gem_object *obj);
101 : static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
102 : static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
103 : static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
104 : struct intel_link_m_n *m_n,
105 : struct intel_link_m_n *m2_n2);
106 : static void ironlake_set_pipeconf(struct drm_crtc *crtc);
107 : static void haswell_set_pipeconf(struct drm_crtc *crtc);
108 : static void intel_set_pipe_csc(struct drm_crtc *crtc);
109 : static void vlv_prepare_pll(struct intel_crtc *crtc,
110 : const struct intel_crtc_state *pipe_config);
111 : static void chv_prepare_pll(struct intel_crtc *crtc,
112 : const struct intel_crtc_state *pipe_config);
113 : static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
114 : static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
115 : static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
116 : struct intel_crtc_state *crtc_state);
117 : static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
118 : int num_connectors);
119 : static void skylake_pfit_enable(struct intel_crtc *crtc);
120 : static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
121 : static void ironlake_pfit_enable(struct intel_crtc *crtc);
122 : static void intel_modeset_setup_hw_state(struct drm_device *dev);
123 : static void intel_pre_disable_primary(struct drm_crtc *crtc);
124 :
125 : typedef struct {
126 : int min, max;
127 : } intel_range_t;
128 :
129 : typedef struct {
130 : int dot_limit;
131 : int p2_slow, p2_fast;
132 : } intel_p2_t;
133 :
134 : typedef struct intel_limit intel_limit_t;
135 : struct intel_limit {
136 : intel_range_t dot, vco, n, m, m1, m2, p, p1;
137 : intel_p2_t p2;
138 : };
139 :
140 : /* returns HPLL frequency in kHz */
141 0 : static int valleyview_get_vco(struct drm_i915_private *dev_priv)
142 : {
143 0 : int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
144 :
145 : /* Obtain SKU information */
146 0 : mutex_lock(&dev_priv->sb_lock);
147 0 : hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
148 : CCK_FUSE_HPLL_FREQ_MASK;
149 0 : mutex_unlock(&dev_priv->sb_lock);
150 :
151 0 : return vco_freq[hpll_freq] * 1000;
152 0 : }
153 :
154 0 : static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
155 : const char *name, u32 reg)
156 : {
157 : u32 val;
158 : int divider;
159 :
160 0 : if (dev_priv->hpll_freq == 0)
161 0 : dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
162 :
163 0 : mutex_lock(&dev_priv->sb_lock);
164 0 : val = vlv_cck_read(dev_priv, reg);
165 0 : mutex_unlock(&dev_priv->sb_lock);
166 :
167 0 : divider = val & CCK_FREQUENCY_VALUES;
168 :
169 0 : WARN((val & CCK_FREQUENCY_STATUS) !=
170 : (divider << CCK_FREQUENCY_STATUS_SHIFT),
171 : "%s change in progress\n", name);
172 :
173 0 : return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
174 : }
175 :
176 : int
177 0 : intel_pch_rawclk(struct drm_device *dev)
178 : {
179 0 : struct drm_i915_private *dev_priv = dev->dev_private;
180 :
181 0 : WARN_ON(!HAS_PCH_SPLIT(dev));
182 :
183 0 : return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
184 : }
185 :
186 : /* hrawclock is 1/4 the FSB frequency */
187 0 : int intel_hrawclk(struct drm_device *dev)
188 : {
189 0 : struct drm_i915_private *dev_priv = dev->dev_private;
190 : uint32_t clkcfg;
191 :
192 : /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
193 0 : if (IS_VALLEYVIEW(dev))
194 0 : return 200;
195 :
196 0 : clkcfg = I915_READ(CLKCFG);
197 0 : switch (clkcfg & CLKCFG_FSB_MASK) {
198 : case CLKCFG_FSB_400:
199 0 : return 100;
200 : case CLKCFG_FSB_533:
201 0 : return 133;
202 : case CLKCFG_FSB_667:
203 0 : return 166;
204 : case CLKCFG_FSB_800:
205 0 : return 200;
206 : case CLKCFG_FSB_1067:
207 0 : return 266;
208 : case CLKCFG_FSB_1333:
209 0 : return 333;
210 : /* these two are just a guess; one of them might be right */
211 : case CLKCFG_FSB_1600:
212 : case CLKCFG_FSB_1600_ALT:
213 0 : return 400;
214 : default:
215 : return 133;
216 : }
217 0 : }
218 :
219 0 : static void intel_update_czclk(struct drm_i915_private *dev_priv)
220 : {
221 0 : if (!IS_VALLEYVIEW(dev_priv))
222 : return;
223 :
224 0 : dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
225 : CCK_CZ_CLOCK_CONTROL);
226 :
227 0 : DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
228 0 : }
229 :
230 : static inline u32 /* units of 100MHz */
231 0 : intel_fdi_link_freq(struct drm_device *dev)
232 : {
233 0 : if (IS_GEN5(dev)) {
234 0 : struct drm_i915_private *dev_priv = dev->dev_private;
235 0 : return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
236 : } else
237 0 : return 27;
238 0 : }
239 :
240 : static const intel_limit_t intel_limits_i8xx_dac = {
241 : .dot = { .min = 25000, .max = 350000 },
242 : .vco = { .min = 908000, .max = 1512000 },
243 : .n = { .min = 2, .max = 16 },
244 : .m = { .min = 96, .max = 140 },
245 : .m1 = { .min = 18, .max = 26 },
246 : .m2 = { .min = 6, .max = 16 },
247 : .p = { .min = 4, .max = 128 },
248 : .p1 = { .min = 2, .max = 33 },
249 : .p2 = { .dot_limit = 165000,
250 : .p2_slow = 4, .p2_fast = 2 },
251 : };
252 :
253 : static const intel_limit_t intel_limits_i8xx_dvo = {
254 : .dot = { .min = 25000, .max = 350000 },
255 : .vco = { .min = 908000, .max = 1512000 },
256 : .n = { .min = 2, .max = 16 },
257 : .m = { .min = 96, .max = 140 },
258 : .m1 = { .min = 18, .max = 26 },
259 : .m2 = { .min = 6, .max = 16 },
260 : .p = { .min = 4, .max = 128 },
261 : .p1 = { .min = 2, .max = 33 },
262 : .p2 = { .dot_limit = 165000,
263 : .p2_slow = 4, .p2_fast = 4 },
264 : };
265 :
266 : static const intel_limit_t intel_limits_i8xx_lvds = {
267 : .dot = { .min = 25000, .max = 350000 },
268 : .vco = { .min = 908000, .max = 1512000 },
269 : .n = { .min = 2, .max = 16 },
270 : .m = { .min = 96, .max = 140 },
271 : .m1 = { .min = 18, .max = 26 },
272 : .m2 = { .min = 6, .max = 16 },
273 : .p = { .min = 4, .max = 128 },
274 : .p1 = { .min = 1, .max = 6 },
275 : .p2 = { .dot_limit = 165000,
276 : .p2_slow = 14, .p2_fast = 7 },
277 : };
278 :
279 : static const intel_limit_t intel_limits_i9xx_sdvo = {
280 : .dot = { .min = 20000, .max = 400000 },
281 : .vco = { .min = 1400000, .max = 2800000 },
282 : .n = { .min = 1, .max = 6 },
283 : .m = { .min = 70, .max = 120 },
284 : .m1 = { .min = 8, .max = 18 },
285 : .m2 = { .min = 3, .max = 7 },
286 : .p = { .min = 5, .max = 80 },
287 : .p1 = { .min = 1, .max = 8 },
288 : .p2 = { .dot_limit = 200000,
289 : .p2_slow = 10, .p2_fast = 5 },
290 : };
291 :
292 : static const intel_limit_t intel_limits_i9xx_lvds = {
293 : .dot = { .min = 20000, .max = 400000 },
294 : .vco = { .min = 1400000, .max = 2800000 },
295 : .n = { .min = 1, .max = 6 },
296 : .m = { .min = 70, .max = 120 },
297 : .m1 = { .min = 8, .max = 18 },
298 : .m2 = { .min = 3, .max = 7 },
299 : .p = { .min = 7, .max = 98 },
300 : .p1 = { .min = 1, .max = 8 },
301 : .p2 = { .dot_limit = 112000,
302 : .p2_slow = 14, .p2_fast = 7 },
303 : };
304 :
305 :
306 : static const intel_limit_t intel_limits_g4x_sdvo = {
307 : .dot = { .min = 25000, .max = 270000 },
308 : .vco = { .min = 1750000, .max = 3500000},
309 : .n = { .min = 1, .max = 4 },
310 : .m = { .min = 104, .max = 138 },
311 : .m1 = { .min = 17, .max = 23 },
312 : .m2 = { .min = 5, .max = 11 },
313 : .p = { .min = 10, .max = 30 },
314 : .p1 = { .min = 1, .max = 3},
315 : .p2 = { .dot_limit = 270000,
316 : .p2_slow = 10,
317 : .p2_fast = 10
318 : },
319 : };
320 :
321 : static const intel_limit_t intel_limits_g4x_hdmi = {
322 : .dot = { .min = 22000, .max = 400000 },
323 : .vco = { .min = 1750000, .max = 3500000},
324 : .n = { .min = 1, .max = 4 },
325 : .m = { .min = 104, .max = 138 },
326 : .m1 = { .min = 16, .max = 23 },
327 : .m2 = { .min = 5, .max = 11 },
328 : .p = { .min = 5, .max = 80 },
329 : .p1 = { .min = 1, .max = 8},
330 : .p2 = { .dot_limit = 165000,
331 : .p2_slow = 10, .p2_fast = 5 },
332 : };
333 :
334 : static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
335 : .dot = { .min = 20000, .max = 115000 },
336 : .vco = { .min = 1750000, .max = 3500000 },
337 : .n = { .min = 1, .max = 3 },
338 : .m = { .min = 104, .max = 138 },
339 : .m1 = { .min = 17, .max = 23 },
340 : .m2 = { .min = 5, .max = 11 },
341 : .p = { .min = 28, .max = 112 },
342 : .p1 = { .min = 2, .max = 8 },
343 : .p2 = { .dot_limit = 0,
344 : .p2_slow = 14, .p2_fast = 14
345 : },
346 : };
347 :
348 : static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
349 : .dot = { .min = 80000, .max = 224000 },
350 : .vco = { .min = 1750000, .max = 3500000 },
351 : .n = { .min = 1, .max = 3 },
352 : .m = { .min = 104, .max = 138 },
353 : .m1 = { .min = 17, .max = 23 },
354 : .m2 = { .min = 5, .max = 11 },
355 : .p = { .min = 14, .max = 42 },
356 : .p1 = { .min = 2, .max = 6 },
357 : .p2 = { .dot_limit = 0,
358 : .p2_slow = 7, .p2_fast = 7
359 : },
360 : };
361 :
362 : static const intel_limit_t intel_limits_pineview_sdvo = {
363 : .dot = { .min = 20000, .max = 400000},
364 : .vco = { .min = 1700000, .max = 3500000 },
365 : /* Pineview's Ncounter is a ring counter */
366 : .n = { .min = 3, .max = 6 },
367 : .m = { .min = 2, .max = 256 },
368 : /* Pineview only has one combined m divider, which we treat as m2. */
369 : .m1 = { .min = 0, .max = 0 },
370 : .m2 = { .min = 0, .max = 254 },
371 : .p = { .min = 5, .max = 80 },
372 : .p1 = { .min = 1, .max = 8 },
373 : .p2 = { .dot_limit = 200000,
374 : .p2_slow = 10, .p2_fast = 5 },
375 : };
376 :
377 : static const intel_limit_t intel_limits_pineview_lvds = {
378 : .dot = { .min = 20000, .max = 400000 },
379 : .vco = { .min = 1700000, .max = 3500000 },
380 : .n = { .min = 3, .max = 6 },
381 : .m = { .min = 2, .max = 256 },
382 : .m1 = { .min = 0, .max = 0 },
383 : .m2 = { .min = 0, .max = 254 },
384 : .p = { .min = 7, .max = 112 },
385 : .p1 = { .min = 1, .max = 8 },
386 : .p2 = { .dot_limit = 112000,
387 : .p2_slow = 14, .p2_fast = 14 },
388 : };
389 :
390 : /* Ironlake / Sandybridge
391 : *
392 : * We calculate clock using (register_value + 2) for N/M1/M2, so here
393 : * the range value for them is (actual_value - 2).
394 : */
395 : static const intel_limit_t intel_limits_ironlake_dac = {
396 : .dot = { .min = 25000, .max = 350000 },
397 : .vco = { .min = 1760000, .max = 3510000 },
398 : .n = { .min = 1, .max = 5 },
399 : .m = { .min = 79, .max = 127 },
400 : .m1 = { .min = 12, .max = 22 },
401 : .m2 = { .min = 5, .max = 9 },
402 : .p = { .min = 5, .max = 80 },
403 : .p1 = { .min = 1, .max = 8 },
404 : .p2 = { .dot_limit = 225000,
405 : .p2_slow = 10, .p2_fast = 5 },
406 : };
407 :
408 : static const intel_limit_t intel_limits_ironlake_single_lvds = {
409 : .dot = { .min = 25000, .max = 350000 },
410 : .vco = { .min = 1760000, .max = 3510000 },
411 : .n = { .min = 1, .max = 3 },
412 : .m = { .min = 79, .max = 118 },
413 : .m1 = { .min = 12, .max = 22 },
414 : .m2 = { .min = 5, .max = 9 },
415 : .p = { .min = 28, .max = 112 },
416 : .p1 = { .min = 2, .max = 8 },
417 : .p2 = { .dot_limit = 225000,
418 : .p2_slow = 14, .p2_fast = 14 },
419 : };
420 :
421 : static const intel_limit_t intel_limits_ironlake_dual_lvds = {
422 : .dot = { .min = 25000, .max = 350000 },
423 : .vco = { .min = 1760000, .max = 3510000 },
424 : .n = { .min = 1, .max = 3 },
425 : .m = { .min = 79, .max = 127 },
426 : .m1 = { .min = 12, .max = 22 },
427 : .m2 = { .min = 5, .max = 9 },
428 : .p = { .min = 14, .max = 56 },
429 : .p1 = { .min = 2, .max = 8 },
430 : .p2 = { .dot_limit = 225000,
431 : .p2_slow = 7, .p2_fast = 7 },
432 : };
433 :
434 : /* LVDS 100mhz refclk limits. */
435 : static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
436 : .dot = { .min = 25000, .max = 350000 },
437 : .vco = { .min = 1760000, .max = 3510000 },
438 : .n = { .min = 1, .max = 2 },
439 : .m = { .min = 79, .max = 126 },
440 : .m1 = { .min = 12, .max = 22 },
441 : .m2 = { .min = 5, .max = 9 },
442 : .p = { .min = 28, .max = 112 },
443 : .p1 = { .min = 2, .max = 8 },
444 : .p2 = { .dot_limit = 225000,
445 : .p2_slow = 14, .p2_fast = 14 },
446 : };
447 :
448 : static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
449 : .dot = { .min = 25000, .max = 350000 },
450 : .vco = { .min = 1760000, .max = 3510000 },
451 : .n = { .min = 1, .max = 3 },
452 : .m = { .min = 79, .max = 126 },
453 : .m1 = { .min = 12, .max = 22 },
454 : .m2 = { .min = 5, .max = 9 },
455 : .p = { .min = 14, .max = 42 },
456 : .p1 = { .min = 2, .max = 6 },
457 : .p2 = { .dot_limit = 225000,
458 : .p2_slow = 7, .p2_fast = 7 },
459 : };
460 :
461 : static const intel_limit_t intel_limits_vlv = {
462 : /*
463 : * These are the data rate limits (measured in fast clocks)
464 : * since those are the strictest limits we have. The fast
465 : * clock and actual rate limits are more relaxed, so checking
466 : * them would make no difference.
467 : */
468 : .dot = { .min = 25000 * 5, .max = 270000 * 5 },
469 : .vco = { .min = 4000000, .max = 6000000 },
470 : .n = { .min = 1, .max = 7 },
471 : .m1 = { .min = 2, .max = 3 },
472 : .m2 = { .min = 11, .max = 156 },
473 : .p1 = { .min = 2, .max = 3 },
474 : .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
475 : };
476 :
477 : static const intel_limit_t intel_limits_chv = {
478 : /*
479 : * These are the data rate limits (measured in fast clocks)
480 : * since those are the strictest limits we have. The fast
481 : * clock and actual rate limits are more relaxed, so checking
482 : * them would make no difference.
483 : */
484 : .dot = { .min = 25000 * 5, .max = 540000 * 5},
485 : .vco = { .min = 4800000, .max = 6480000 },
486 : .n = { .min = 1, .max = 1 },
487 : .m1 = { .min = 2, .max = 2 },
488 : .m2 = { .min = 24 << 22, .max = 175 << 22 },
489 : .p1 = { .min = 2, .max = 4 },
490 : .p2 = { .p2_slow = 1, .p2_fast = 14 },
491 : };
492 :
493 : static const intel_limit_t intel_limits_bxt = {
494 : /* FIXME: find real dot limits */
495 : .dot = { .min = 0, .max = INT_MAX },
496 : .vco = { .min = 4800000, .max = 6700000 },
497 : .n = { .min = 1, .max = 1 },
498 : .m1 = { .min = 2, .max = 2 },
499 : /* FIXME: find real m2 limits */
500 : .m2 = { .min = 2 << 22, .max = 255 << 22 },
501 : .p1 = { .min = 2, .max = 4 },
502 : .p2 = { .p2_slow = 1, .p2_fast = 20 },
503 : };
504 :
505 : static bool
506 0 : needs_modeset(struct drm_crtc_state *state)
507 : {
508 0 : return drm_atomic_crtc_needs_modeset(state);
509 : }
510 :
511 : /**
512 : * Returns whether any output on the specified pipe is of the specified type
513 : */
514 0 : bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
515 : {
516 0 : struct drm_device *dev = crtc->base.dev;
517 : struct intel_encoder *encoder;
518 :
519 0 : for_each_encoder_on_crtc(dev, &crtc->base, encoder)
520 0 : if (encoder->type == type)
521 0 : return true;
522 :
523 0 : return false;
524 0 : }
525 :
526 : /**
527 : * Returns whether any output on the specified pipe will have the specified
528 : * type after a staged modeset is complete, i.e., the same as
529 : * intel_pipe_has_type() but looking at encoder->new_crtc instead of
530 : * encoder->crtc.
531 : */
532 0 : static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
533 : int type)
534 : {
535 0 : struct drm_atomic_state *state = crtc_state->base.state;
536 : struct drm_connector *connector;
537 : struct drm_connector_state *connector_state;
538 : struct intel_encoder *encoder;
539 : int i, num_connectors = 0;
540 :
541 0 : for_each_connector_in_state(state, connector, connector_state, i) {
542 0 : if (connector_state->crtc != crtc_state->base.crtc)
543 : continue;
544 :
545 0 : num_connectors++;
546 :
547 0 : encoder = to_intel_encoder(connector_state->best_encoder);
548 0 : if (encoder->type == type)
549 0 : return true;
550 : }
551 :
552 0 : WARN_ON(num_connectors == 0);
553 :
554 0 : return false;
555 0 : }
556 :
557 : static const intel_limit_t *
558 0 : intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
559 : {
560 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
561 : const intel_limit_t *limit;
562 :
563 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
564 0 : if (intel_is_dual_link_lvds(dev)) {
565 0 : if (refclk == 100000)
566 0 : limit = &intel_limits_ironlake_dual_lvds_100m;
567 : else
568 : limit = &intel_limits_ironlake_dual_lvds;
569 : } else {
570 0 : if (refclk == 100000)
571 0 : limit = &intel_limits_ironlake_single_lvds_100m;
572 : else
573 : limit = &intel_limits_ironlake_single_lvds;
574 : }
575 : } else
576 : limit = &intel_limits_ironlake_dac;
577 :
578 0 : return limit;
579 : }
580 :
581 : static const intel_limit_t *
582 0 : intel_g4x_limit(struct intel_crtc_state *crtc_state)
583 : {
584 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
585 : const intel_limit_t *limit;
586 :
587 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
588 0 : if (intel_is_dual_link_lvds(dev))
589 0 : limit = &intel_limits_g4x_dual_channel_lvds;
590 : else
591 : limit = &intel_limits_g4x_single_channel_lvds;
592 0 : } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
593 0 : intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
594 : limit = &intel_limits_g4x_hdmi;
595 0 : } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
596 : limit = &intel_limits_g4x_sdvo;
597 0 : } else /* The option is for other outputs */
598 : limit = &intel_limits_i9xx_sdvo;
599 :
600 0 : return limit;
601 : }
602 :
603 : static const intel_limit_t *
604 0 : intel_limit(struct intel_crtc_state *crtc_state, int refclk)
605 : {
606 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
607 : const intel_limit_t *limit;
608 :
609 0 : if (IS_BROXTON(dev))
610 0 : limit = &intel_limits_bxt;
611 0 : else if (HAS_PCH_SPLIT(dev))
612 0 : limit = intel_ironlake_limit(crtc_state, refclk);
613 0 : else if (IS_G4X(dev)) {
614 0 : limit = intel_g4x_limit(crtc_state);
615 0 : } else if (IS_PINEVIEW(dev)) {
616 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
617 0 : limit = &intel_limits_pineview_lvds;
618 : else
619 : limit = &intel_limits_pineview_sdvo;
620 0 : } else if (IS_CHERRYVIEW(dev)) {
621 : limit = &intel_limits_chv;
622 0 : } else if (IS_VALLEYVIEW(dev)) {
623 : limit = &intel_limits_vlv;
624 0 : } else if (!IS_GEN2(dev)) {
625 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
626 0 : limit = &intel_limits_i9xx_lvds;
627 : else
628 : limit = &intel_limits_i9xx_sdvo;
629 : } else {
630 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
631 0 : limit = &intel_limits_i8xx_lvds;
632 0 : else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
633 0 : limit = &intel_limits_i8xx_dvo;
634 : else
635 : limit = &intel_limits_i8xx_dac;
636 : }
637 0 : return limit;
638 : }
639 :
640 : /*
641 : * Platform specific helpers to calculate the port PLL loopback- (clock.m),
642 : * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
643 : * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
644 : * The helpers' return value is the rate of the clock that is fed to the
645 : * display engine's pipe which can be the above fast dot clock rate or a
646 : * divided-down version of it.
647 : */
648 : /* m1 is reserved as 0 in Pineview, n is a ring counter */
649 0 : static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
650 : {
651 0 : clock->m = clock->m2 + 2;
652 0 : clock->p = clock->p1 * clock->p2;
653 0 : if (WARN_ON(clock->n == 0 || clock->p == 0))
654 0 : return 0;
655 0 : clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
656 0 : clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
657 :
658 0 : return clock->dot;
659 0 : }
660 :
661 0 : static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
662 : {
663 0 : return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
664 : }
665 :
666 0 : static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
667 : {
668 0 : clock->m = i9xx_dpll_compute_m(clock);
669 0 : clock->p = clock->p1 * clock->p2;
670 0 : if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
671 0 : return 0;
672 0 : clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
673 0 : clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
674 :
675 0 : return clock->dot;
676 0 : }
677 :
678 0 : static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
679 : {
680 0 : clock->m = clock->m1 * clock->m2;
681 0 : clock->p = clock->p1 * clock->p2;
682 0 : if (WARN_ON(clock->n == 0 || clock->p == 0))
683 0 : return 0;
684 0 : clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
685 0 : clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
686 :
687 0 : return clock->dot / 5;
688 0 : }
689 :
690 0 : int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
691 : {
692 0 : clock->m = clock->m1 * clock->m2;
693 0 : clock->p = clock->p1 * clock->p2;
694 0 : if (WARN_ON(clock->n == 0 || clock->p == 0))
695 0 : return 0;
696 0 : clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
697 : clock->n << 22);
698 0 : clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
699 :
700 0 : return clock->dot / 5;
701 0 : }
702 :
703 : #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
704 : /**
705 : * Returns whether the given set of divisors are valid for a given refclk with
706 : * the given connectors.
707 : */
708 :
709 0 : static bool intel_PLL_is_valid(struct drm_device *dev,
710 : const intel_limit_t *limit,
711 : const intel_clock_t *clock)
712 : {
713 0 : if (clock->n < limit->n.min || limit->n.max < clock->n)
714 0 : INTELPllInvalid("n out of range\n");
715 0 : if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
716 0 : INTELPllInvalid("p1 out of range\n");
717 0 : if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
718 0 : INTELPllInvalid("m2 out of range\n");
719 0 : if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
720 0 : INTELPllInvalid("m1 out of range\n");
721 :
722 0 : if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
723 0 : if (clock->m1 <= clock->m2)
724 0 : INTELPllInvalid("m1 <= m2\n");
725 :
726 0 : if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
727 0 : if (clock->p < limit->p.min || limit->p.max < clock->p)
728 0 : INTELPllInvalid("p out of range\n");
729 0 : if (clock->m < limit->m.min || limit->m.max < clock->m)
730 0 : INTELPllInvalid("m out of range\n");
731 : }
732 :
733 0 : if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
734 0 : INTELPllInvalid("vco out of range\n");
735 : /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
736 : * connector, etc., rather than just a single range.
737 : */
738 0 : if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
739 0 : INTELPllInvalid("dot out of range\n");
740 :
741 0 : return true;
742 0 : }
743 :
744 : static int
745 0 : i9xx_select_p2_div(const intel_limit_t *limit,
746 : const struct intel_crtc_state *crtc_state,
747 : int target)
748 : {
749 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
750 :
751 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
752 : /*
753 : * For LVDS just rely on its current settings for dual-channel.
754 : * We haven't figured out how to reliably set up different
755 : * single/dual channel state, if we even can.
756 : */
757 0 : if (intel_is_dual_link_lvds(dev))
758 0 : return limit->p2.p2_fast;
759 : else
760 0 : return limit->p2.p2_slow;
761 : } else {
762 0 : if (target < limit->p2.dot_limit)
763 0 : return limit->p2.p2_slow;
764 : else
765 0 : return limit->p2.p2_fast;
766 : }
767 0 : }
768 :
769 : static bool
770 0 : i9xx_find_best_dpll(const intel_limit_t *limit,
771 : struct intel_crtc_state *crtc_state,
772 : int target, int refclk, intel_clock_t *match_clock,
773 : intel_clock_t *best_clock)
774 : {
775 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
776 0 : intel_clock_t clock;
777 : int err = target;
778 :
779 0 : memset(best_clock, 0, sizeof(*best_clock));
780 :
781 0 : clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
782 :
783 0 : for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
784 0 : clock.m1++) {
785 0 : for (clock.m2 = limit->m2.min;
786 0 : clock.m2 <= limit->m2.max; clock.m2++) {
787 0 : if (clock.m2 >= clock.m1)
788 : break;
789 0 : for (clock.n = limit->n.min;
790 0 : clock.n <= limit->n.max; clock.n++) {
791 0 : for (clock.p1 = limit->p1.min;
792 0 : clock.p1 <= limit->p1.max; clock.p1++) {
793 : int this_err;
794 :
795 0 : i9xx_calc_dpll_params(refclk, &clock);
796 0 : if (!intel_PLL_is_valid(dev, limit,
797 : &clock))
798 0 : continue;
799 0 : if (match_clock &&
800 0 : clock.p != match_clock->p)
801 0 : continue;
802 :
803 0 : this_err = abs(clock.dot - target);
804 0 : if (this_err < err) {
805 0 : *best_clock = clock;
806 : err = this_err;
807 0 : }
808 0 : }
809 : }
810 : }
811 : }
812 :
813 0 : return (err != target);
814 0 : }
815 :
816 : static bool
817 0 : pnv_find_best_dpll(const intel_limit_t *limit,
818 : struct intel_crtc_state *crtc_state,
819 : int target, int refclk, intel_clock_t *match_clock,
820 : intel_clock_t *best_clock)
821 : {
822 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
823 0 : intel_clock_t clock;
824 : int err = target;
825 :
826 0 : memset(best_clock, 0, sizeof(*best_clock));
827 :
828 0 : clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
829 :
830 0 : for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
831 0 : clock.m1++) {
832 0 : for (clock.m2 = limit->m2.min;
833 0 : clock.m2 <= limit->m2.max; clock.m2++) {
834 0 : for (clock.n = limit->n.min;
835 0 : clock.n <= limit->n.max; clock.n++) {
836 0 : for (clock.p1 = limit->p1.min;
837 0 : clock.p1 <= limit->p1.max; clock.p1++) {
838 : int this_err;
839 :
840 0 : pnv_calc_dpll_params(refclk, &clock);
841 0 : if (!intel_PLL_is_valid(dev, limit,
842 : &clock))
843 0 : continue;
844 0 : if (match_clock &&
845 0 : clock.p != match_clock->p)
846 0 : continue;
847 :
848 0 : this_err = abs(clock.dot - target);
849 0 : if (this_err < err) {
850 0 : *best_clock = clock;
851 : err = this_err;
852 0 : }
853 0 : }
854 : }
855 : }
856 : }
857 :
858 0 : return (err != target);
859 0 : }
860 :
861 : static bool
862 0 : g4x_find_best_dpll(const intel_limit_t *limit,
863 : struct intel_crtc_state *crtc_state,
864 : int target, int refclk, intel_clock_t *match_clock,
865 : intel_clock_t *best_clock)
866 : {
867 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
868 0 : intel_clock_t clock;
869 : int max_n;
870 : bool found = false;
871 : /* approximately equals target * 0.00585 */
872 0 : int err_most = (target >> 8) + (target >> 9);
873 :
874 0 : memset(best_clock, 0, sizeof(*best_clock));
875 :
876 0 : clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
877 :
878 0 : max_n = limit->n.max;
879 : /* based on hardware requirement, prefer smaller n to precision */
880 0 : for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
881 : /* based on hardware requirement, prefere larger m1,m2 */
882 0 : for (clock.m1 = limit->m1.max;
883 0 : clock.m1 >= limit->m1.min; clock.m1--) {
884 0 : for (clock.m2 = limit->m2.max;
885 0 : clock.m2 >= limit->m2.min; clock.m2--) {
886 0 : for (clock.p1 = limit->p1.max;
887 0 : clock.p1 >= limit->p1.min; clock.p1--) {
888 : int this_err;
889 :
890 0 : i9xx_calc_dpll_params(refclk, &clock);
891 0 : if (!intel_PLL_is_valid(dev, limit,
892 : &clock))
893 0 : continue;
894 :
895 0 : this_err = abs(clock.dot - target);
896 0 : if (this_err < err_most) {
897 0 : *best_clock = clock;
898 : err_most = this_err;
899 0 : max_n = clock.n;
900 : found = true;
901 0 : }
902 0 : }
903 : }
904 : }
905 : }
906 0 : return found;
907 0 : }
908 :
909 : /*
910 : * Check if the calculated PLL configuration is more optimal compared to the
911 : * best configuration and error found so far. Return the calculated error.
912 : */
913 0 : static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
914 : const intel_clock_t *calculated_clock,
915 : const intel_clock_t *best_clock,
916 : unsigned int best_error_ppm,
917 : unsigned int *error_ppm)
918 : {
919 : /*
920 : * For CHV ignore the error and consider only the P value.
921 : * Prefer a bigger P value based on HW requirements.
922 : */
923 0 : if (IS_CHERRYVIEW(dev)) {
924 0 : *error_ppm = 0;
925 :
926 0 : return calculated_clock->p > best_clock->p;
927 : }
928 :
929 0 : if (WARN_ON_ONCE(!target_freq))
930 0 : return false;
931 :
932 0 : *error_ppm = div_u64(1000000ULL *
933 0 : abs(target_freq - calculated_clock->dot),
934 : target_freq);
935 : /*
936 : * Prefer a better P value over a better (smaller) error if the error
937 : * is small. Ensure this preference for future configurations too by
938 : * setting the error to 0.
939 : */
940 0 : if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
941 0 : *error_ppm = 0;
942 :
943 0 : return true;
944 : }
945 :
946 0 : return *error_ppm + 10 < best_error_ppm;
947 0 : }
948 :
949 : static bool
950 0 : vlv_find_best_dpll(const intel_limit_t *limit,
951 : struct intel_crtc_state *crtc_state,
952 : int target, int refclk, intel_clock_t *match_clock,
953 : intel_clock_t *best_clock)
954 : {
955 0 : struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
956 0 : struct drm_device *dev = crtc->base.dev;
957 0 : intel_clock_t clock;
958 : unsigned int bestppm = 1000000;
959 : /* min update 19.2 MHz */
960 0 : int max_n = min(limit->n.max, refclk / 19200);
961 : bool found = false;
962 :
963 0 : target *= 5; /* fast clock */
964 :
965 0 : memset(best_clock, 0, sizeof(*best_clock));
966 :
967 : /* based on hardware requirement, prefer smaller n to precision */
968 0 : for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
969 0 : for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
970 0 : for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
971 0 : clock.p2 -= clock.p2 > 10 ? 2 : 1) {
972 0 : clock.p = clock.p1 * clock.p2;
973 : /* based on hardware requirement, prefer bigger m1,m2 values */
974 0 : for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
975 0 : unsigned int ppm;
976 :
977 0 : clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
978 : refclk * clock.m1);
979 :
980 0 : vlv_calc_dpll_params(refclk, &clock);
981 :
982 0 : if (!intel_PLL_is_valid(dev, limit,
983 : &clock))
984 0 : continue;
985 :
986 0 : if (!vlv_PLL_is_optimal(dev, target,
987 : &clock,
988 : best_clock,
989 : bestppm, &ppm))
990 0 : continue;
991 :
992 0 : *best_clock = clock;
993 0 : bestppm = ppm;
994 : found = true;
995 0 : }
996 : }
997 : }
998 : }
999 :
1000 0 : return found;
1001 0 : }
1002 :
1003 : static bool
1004 0 : chv_find_best_dpll(const intel_limit_t *limit,
1005 : struct intel_crtc_state *crtc_state,
1006 : int target, int refclk, intel_clock_t *match_clock,
1007 : intel_clock_t *best_clock)
1008 : {
1009 0 : struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1010 0 : struct drm_device *dev = crtc->base.dev;
1011 : unsigned int best_error_ppm;
1012 0 : intel_clock_t clock;
1013 : uint64_t m2;
1014 : int found = false;
1015 :
1016 0 : memset(best_clock, 0, sizeof(*best_clock));
1017 : best_error_ppm = 1000000;
1018 :
1019 : /*
1020 : * Based on hardware doc, the n always set to 1, and m1 always
1021 : * set to 2. If requires to support 200Mhz refclk, we need to
1022 : * revisit this because n may not 1 anymore.
1023 : */
1024 0 : clock.n = 1, clock.m1 = 2;
1025 0 : target *= 5; /* fast clock */
1026 :
1027 0 : for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1028 0 : for (clock.p2 = limit->p2.p2_fast;
1029 0 : clock.p2 >= limit->p2.p2_slow;
1030 0 : clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1031 0 : unsigned int error_ppm;
1032 :
1033 0 : clock.p = clock.p1 * clock.p2;
1034 :
1035 0 : m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1036 : clock.n) << 22, refclk * clock.m1);
1037 :
1038 0 : if (m2 > INT_MAX/clock.m1)
1039 0 : continue;
1040 :
1041 0 : clock.m2 = m2;
1042 :
1043 0 : chv_calc_dpll_params(refclk, &clock);
1044 :
1045 0 : if (!intel_PLL_is_valid(dev, limit, &clock))
1046 0 : continue;
1047 :
1048 0 : if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1049 : best_error_ppm, &error_ppm))
1050 0 : continue;
1051 :
1052 0 : *best_clock = clock;
1053 0 : best_error_ppm = error_ppm;
1054 : found = true;
1055 0 : }
1056 : }
1057 :
1058 0 : return found;
1059 0 : }
1060 :
1061 0 : bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1062 : intel_clock_t *best_clock)
1063 : {
1064 0 : int refclk = i9xx_get_refclk(crtc_state, 0);
1065 :
1066 0 : return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1067 : target_clock, refclk, NULL, best_clock);
1068 : }
1069 :
1070 0 : bool intel_crtc_active(struct drm_crtc *crtc)
1071 : {
1072 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1073 :
1074 : /* Be paranoid as we can arrive here with only partial
1075 : * state retrieved from the hardware during setup.
1076 : *
1077 : * We can ditch the adjusted_mode.crtc_clock check as soon
1078 : * as Haswell has gained clock readout/fastboot support.
1079 : *
1080 : * We can ditch the crtc->primary->fb check as soon as we can
1081 : * properly reconstruct framebuffers.
1082 : *
1083 : * FIXME: The intel_crtc->active here should be switched to
1084 : * crtc->state->active once we have proper CRTC states wired up
1085 : * for atomic.
1086 : */
1087 0 : return intel_crtc->active && crtc->primary->state->fb &&
1088 0 : intel_crtc->config->base.adjusted_mode.crtc_clock;
1089 : }
1090 :
1091 0 : enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1092 : enum pipe pipe)
1093 : {
1094 0 : struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1095 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1096 :
1097 0 : return intel_crtc->config->cpu_transcoder;
1098 : }
1099 :
1100 0 : static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1101 : {
1102 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1103 0 : u32 reg = PIPEDSL(pipe);
1104 : u32 line1, line2;
1105 : u32 line_mask;
1106 :
1107 0 : if (IS_GEN2(dev))
1108 0 : line_mask = DSL_LINEMASK_GEN2;
1109 : else
1110 : line_mask = DSL_LINEMASK_GEN3;
1111 :
1112 0 : line1 = I915_READ(reg) & line_mask;
1113 0 : drm_msleep(5);
1114 0 : line2 = I915_READ(reg) & line_mask;
1115 :
1116 0 : return line1 == line2;
1117 : }
1118 :
1119 : /*
1120 : * intel_wait_for_pipe_off - wait for pipe to turn off
1121 : * @crtc: crtc whose pipe to wait for
1122 : *
1123 : * After disabling a pipe, we can't wait for vblank in the usual way,
1124 : * spinning on the vblank interrupt status bit, since we won't actually
1125 : * see an interrupt when the pipe is disabled.
1126 : *
1127 : * On Gen4 and above:
1128 : * wait for the pipe register state bit to turn off
1129 : *
1130 : * Otherwise:
1131 : * wait for the display line value to settle (it usually
1132 : * ends up stopping at the start of the next frame).
1133 : *
1134 : */
1135 0 : static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1136 : {
1137 0 : struct drm_device *dev = crtc->base.dev;
1138 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1139 0 : enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1140 0 : enum pipe pipe = crtc->pipe;
1141 :
1142 0 : if (INTEL_INFO(dev)->gen >= 4) {
1143 0 : int reg = PIPECONF(cpu_transcoder);
1144 :
1145 : /* Wait for the Pipe State to go off */
1146 0 : if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1147 : 100))
1148 0 : WARN(1, "pipe_off wait timed out\n");
1149 0 : } else {
1150 : /* Wait for the display line to settle */
1151 0 : if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1152 0 : WARN(1, "pipe_off wait timed out\n");
1153 : }
1154 0 : }
1155 :
1156 0 : static const char *state_string(bool enabled)
1157 : {
1158 0 : return enabled ? "on" : "off";
1159 : }
1160 :
1161 : /* Only for pre-ILK configs */
1162 0 : void assert_pll(struct drm_i915_private *dev_priv,
1163 : enum pipe pipe, bool state)
1164 : {
1165 : u32 val;
1166 : bool cur_state;
1167 :
1168 0 : val = I915_READ(DPLL(pipe));
1169 0 : cur_state = !!(val & DPLL_VCO_ENABLE);
1170 0 : I915_STATE_WARN(cur_state != state,
1171 : "PLL state assertion failure (expected %s, current %s)\n",
1172 : state_string(state), state_string(cur_state));
1173 0 : }
1174 :
1175 : /* XXX: the dsi pll is shared between MIPI DSI ports */
1176 0 : static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1177 : {
1178 : u32 val;
1179 : bool cur_state;
1180 :
1181 0 : mutex_lock(&dev_priv->sb_lock);
1182 0 : val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1183 0 : mutex_unlock(&dev_priv->sb_lock);
1184 :
1185 0 : cur_state = val & DSI_PLL_VCO_EN;
1186 0 : I915_STATE_WARN(cur_state != state,
1187 : "DSI PLL state assertion failure (expected %s, current %s)\n",
1188 : state_string(state), state_string(cur_state));
1189 0 : }
1190 : #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1191 : #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1192 :
1193 : struct intel_shared_dpll *
1194 0 : intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1195 : {
1196 0 : struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1197 :
1198 0 : if (crtc->config->shared_dpll < 0)
1199 0 : return NULL;
1200 :
1201 0 : return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1202 0 : }
1203 :
1204 : /* For ILK+ */
1205 0 : void assert_shared_dpll(struct drm_i915_private *dev_priv,
1206 : struct intel_shared_dpll *pll,
1207 : bool state)
1208 : {
1209 : bool cur_state;
1210 0 : struct intel_dpll_hw_state hw_state;
1211 :
1212 0 : if (WARN (!pll,
1213 : "asserting DPLL %s with no DPLL\n", state_string(state)))
1214 0 : return;
1215 :
1216 0 : cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1217 0 : I915_STATE_WARN(cur_state != state,
1218 : "%s assertion failure (expected %s, current %s)\n",
1219 : pll->name, state_string(state), state_string(cur_state));
1220 0 : }
1221 :
1222 0 : static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1223 : enum pipe pipe, bool state)
1224 : {
1225 : bool cur_state;
1226 0 : enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1227 : pipe);
1228 :
1229 0 : if (HAS_DDI(dev_priv->dev)) {
1230 : /* DDI does not have a specific FDI_TX register */
1231 0 : u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1232 0 : cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1233 0 : } else {
1234 0 : u32 val = I915_READ(FDI_TX_CTL(pipe));
1235 0 : cur_state = !!(val & FDI_TX_ENABLE);
1236 : }
1237 0 : I915_STATE_WARN(cur_state != state,
1238 : "FDI TX state assertion failure (expected %s, current %s)\n",
1239 : state_string(state), state_string(cur_state));
1240 0 : }
1241 : #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1242 : #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1243 :
1244 0 : static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1245 : enum pipe pipe, bool state)
1246 : {
1247 : u32 val;
1248 : bool cur_state;
1249 :
1250 0 : val = I915_READ(FDI_RX_CTL(pipe));
1251 0 : cur_state = !!(val & FDI_RX_ENABLE);
1252 0 : I915_STATE_WARN(cur_state != state,
1253 : "FDI RX state assertion failure (expected %s, current %s)\n",
1254 : state_string(state), state_string(cur_state));
1255 0 : }
1256 : #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1257 : #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1258 :
1259 0 : static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1260 : enum pipe pipe)
1261 : {
1262 : u32 val;
1263 :
1264 : /* ILK FDI PLL is always enabled */
1265 0 : if (INTEL_INFO(dev_priv->dev)->gen == 5)
1266 0 : return;
1267 :
1268 : /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1269 0 : if (HAS_DDI(dev_priv->dev))
1270 0 : return;
1271 :
1272 0 : val = I915_READ(FDI_TX_CTL(pipe));
1273 0 : I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1274 0 : }
1275 :
1276 0 : void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1277 : enum pipe pipe, bool state)
1278 : {
1279 : u32 val;
1280 : bool cur_state;
1281 :
1282 0 : val = I915_READ(FDI_RX_CTL(pipe));
1283 0 : cur_state = !!(val & FDI_RX_PLL_ENABLE);
1284 0 : I915_STATE_WARN(cur_state != state,
1285 : "FDI RX PLL assertion failure (expected %s, current %s)\n",
1286 : state_string(state), state_string(cur_state));
1287 0 : }
1288 :
1289 0 : void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1290 : enum pipe pipe)
1291 : {
1292 0 : struct drm_device *dev = dev_priv->dev;
1293 : int pp_reg;
1294 : u32 val;
1295 : enum pipe panel_pipe = PIPE_A;
1296 : bool locked = true;
1297 :
1298 0 : if (WARN_ON(HAS_DDI(dev)))
1299 0 : return;
1300 :
1301 0 : if (HAS_PCH_SPLIT(dev)) {
1302 : u32 port_sel;
1303 :
1304 : pp_reg = PCH_PP_CONTROL;
1305 0 : port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1306 :
1307 0 : if (port_sel == PANEL_PORT_SELECT_LVDS &&
1308 0 : I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1309 0 : panel_pipe = PIPE_B;
1310 : /* XXX: else fix for eDP */
1311 0 : } else if (IS_VALLEYVIEW(dev)) {
1312 : /* presumably write lock depends on pipe, not port select */
1313 0 : pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1314 : panel_pipe = pipe;
1315 0 : } else {
1316 : pp_reg = PP_CONTROL;
1317 0 : if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1318 0 : panel_pipe = PIPE_B;
1319 : }
1320 :
1321 0 : val = I915_READ(pp_reg);
1322 0 : if (!(val & PANEL_POWER_ON) ||
1323 0 : ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1324 0 : locked = false;
1325 :
1326 0 : I915_STATE_WARN(panel_pipe == pipe && locked,
1327 : "panel assertion failure, pipe %c regs locked\n",
1328 : pipe_name(pipe));
1329 0 : }
1330 :
1331 0 : static void assert_cursor(struct drm_i915_private *dev_priv,
1332 : enum pipe pipe, bool state)
1333 : {
1334 0 : struct drm_device *dev = dev_priv->dev;
1335 : bool cur_state;
1336 :
1337 0 : if (IS_845G(dev) || IS_I865G(dev))
1338 0 : cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1339 : else
1340 0 : cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1341 :
1342 0 : I915_STATE_WARN(cur_state != state,
1343 : "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1344 : pipe_name(pipe), state_string(state), state_string(cur_state));
1345 0 : }
1346 : #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1347 : #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1348 :
1349 0 : void assert_pipe(struct drm_i915_private *dev_priv,
1350 : enum pipe pipe, bool state)
1351 : {
1352 : bool cur_state;
1353 0 : enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1354 : pipe);
1355 :
1356 : /* if we need the pipe quirk it must be always on */
1357 0 : if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1358 0 : (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1359 0 : state = true;
1360 :
1361 0 : if (!intel_display_power_is_enabled(dev_priv,
1362 0 : POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1363 : cur_state = false;
1364 0 : } else {
1365 0 : u32 val = I915_READ(PIPECONF(cpu_transcoder));
1366 0 : cur_state = !!(val & PIPECONF_ENABLE);
1367 : }
1368 :
1369 0 : I915_STATE_WARN(cur_state != state,
1370 : "pipe %c assertion failure (expected %s, current %s)\n",
1371 : pipe_name(pipe), state_string(state), state_string(cur_state));
1372 0 : }
1373 :
1374 0 : static void assert_plane(struct drm_i915_private *dev_priv,
1375 : enum plane plane, bool state)
1376 : {
1377 : u32 val;
1378 : bool cur_state;
1379 :
1380 0 : val = I915_READ(DSPCNTR(plane));
1381 0 : cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1382 0 : I915_STATE_WARN(cur_state != state,
1383 : "plane %c assertion failure (expected %s, current %s)\n",
1384 : plane_name(plane), state_string(state), state_string(cur_state));
1385 0 : }
1386 :
1387 : #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1388 : #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1389 :
1390 0 : static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1391 : enum pipe pipe)
1392 : {
1393 0 : struct drm_device *dev = dev_priv->dev;
1394 : int i;
1395 :
1396 : /* Primary planes are fixed to pipes on gen4+ */
1397 0 : if (INTEL_INFO(dev)->gen >= 4) {
1398 0 : u32 val = I915_READ(DSPCNTR(pipe));
1399 0 : I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1400 : "plane %c assertion failure, should be disabled but not\n",
1401 : plane_name(pipe));
1402 : return;
1403 : }
1404 :
1405 : /* Need to check both planes against the pipe */
1406 0 : for_each_pipe(dev_priv, i) {
1407 0 : u32 val = I915_READ(DSPCNTR(i));
1408 0 : enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1409 : DISPPLANE_SEL_PIPE_SHIFT;
1410 0 : I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1411 : "plane %c assertion failure, should be off on pipe %c but is still active\n",
1412 : plane_name(i), pipe_name(pipe));
1413 : }
1414 0 : }
1415 :
1416 0 : static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1417 : enum pipe pipe)
1418 : {
1419 0 : struct drm_device *dev = dev_priv->dev;
1420 : int sprite;
1421 :
1422 0 : if (INTEL_INFO(dev)->gen >= 9) {
1423 0 : for_each_sprite(dev_priv, pipe, sprite) {
1424 0 : u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1425 0 : I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1426 : "plane %d assertion failure, should be off on pipe %c but is still active\n",
1427 : sprite, pipe_name(pipe));
1428 : }
1429 0 : } else if (IS_VALLEYVIEW(dev)) {
1430 0 : for_each_sprite(dev_priv, pipe, sprite) {
1431 0 : u32 val = I915_READ(SPCNTR(pipe, sprite));
1432 0 : I915_STATE_WARN(val & SP_ENABLE,
1433 : "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1434 : sprite_name(pipe, sprite), pipe_name(pipe));
1435 : }
1436 0 : } else if (INTEL_INFO(dev)->gen >= 7) {
1437 0 : u32 val = I915_READ(SPRCTL(pipe));
1438 0 : I915_STATE_WARN(val & SPRITE_ENABLE,
1439 : "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1440 : plane_name(pipe), pipe_name(pipe));
1441 0 : } else if (INTEL_INFO(dev)->gen >= 5) {
1442 0 : u32 val = I915_READ(DVSCNTR(pipe));
1443 0 : I915_STATE_WARN(val & DVS_ENABLE,
1444 : "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1445 : plane_name(pipe), pipe_name(pipe));
1446 0 : }
1447 0 : }
1448 :
1449 0 : static void assert_vblank_disabled(struct drm_crtc *crtc)
1450 : {
1451 0 : if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1452 0 : drm_crtc_vblank_put(crtc);
1453 0 : }
1454 :
1455 0 : static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1456 : {
1457 : u32 val;
1458 : bool enabled;
1459 :
1460 0 : I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1461 :
1462 0 : val = I915_READ(PCH_DREF_CONTROL);
1463 0 : enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1464 : DREF_SUPERSPREAD_SOURCE_MASK));
1465 0 : I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1466 0 : }
1467 :
1468 0 : static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1469 : enum pipe pipe)
1470 : {
1471 : u32 val;
1472 : bool enabled;
1473 :
1474 0 : val = I915_READ(PCH_TRANSCONF(pipe));
1475 0 : enabled = !!(val & TRANS_ENABLE);
1476 0 : I915_STATE_WARN(enabled,
1477 : "transcoder assertion failed, should be off on pipe %c but is still active\n",
1478 : pipe_name(pipe));
1479 0 : }
1480 :
1481 0 : static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1482 : enum pipe pipe, u32 port_sel, u32 val)
1483 : {
1484 0 : if ((val & DP_PORT_EN) == 0)
1485 0 : return false;
1486 :
1487 0 : if (HAS_PCH_CPT(dev_priv->dev)) {
1488 0 : u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1489 0 : u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1490 0 : if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1491 0 : return false;
1492 0 : } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1493 0 : if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1494 0 : return false;
1495 : } else {
1496 0 : if ((val & DP_PIPE_MASK) != (pipe << 30))
1497 0 : return false;
1498 : }
1499 0 : return true;
1500 0 : }
1501 :
1502 0 : static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1503 : enum pipe pipe, u32 val)
1504 : {
1505 0 : if ((val & SDVO_ENABLE) == 0)
1506 0 : return false;
1507 :
1508 0 : if (HAS_PCH_CPT(dev_priv->dev)) {
1509 0 : if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1510 0 : return false;
1511 0 : } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1512 0 : if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1513 0 : return false;
1514 : } else {
1515 0 : if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1516 0 : return false;
1517 : }
1518 0 : return true;
1519 0 : }
1520 :
1521 0 : static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1522 : enum pipe pipe, u32 val)
1523 : {
1524 0 : if ((val & LVDS_PORT_EN) == 0)
1525 0 : return false;
1526 :
1527 0 : if (HAS_PCH_CPT(dev_priv->dev)) {
1528 0 : if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1529 0 : return false;
1530 : } else {
1531 0 : if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1532 0 : return false;
1533 : }
1534 0 : return true;
1535 0 : }
1536 :
1537 0 : static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1538 : enum pipe pipe, u32 val)
1539 : {
1540 0 : if ((val & ADPA_DAC_ENABLE) == 0)
1541 0 : return false;
1542 0 : if (HAS_PCH_CPT(dev_priv->dev)) {
1543 0 : if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1544 0 : return false;
1545 : } else {
1546 0 : if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1547 0 : return false;
1548 : }
1549 0 : return true;
1550 0 : }
1551 :
1552 0 : static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1553 : enum pipe pipe, int reg, u32 port_sel)
1554 : {
1555 0 : u32 val = I915_READ(reg);
1556 0 : I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1557 : "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1558 : reg, pipe_name(pipe));
1559 :
1560 0 : I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1561 : && (val & DP_PIPEB_SELECT),
1562 : "IBX PCH dp port still using transcoder B\n");
1563 0 : }
1564 :
1565 0 : static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1566 : enum pipe pipe, int reg)
1567 : {
1568 0 : u32 val = I915_READ(reg);
1569 0 : I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1570 : "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1571 : reg, pipe_name(pipe));
1572 :
1573 0 : I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1574 : && (val & SDVO_PIPE_B_SELECT),
1575 : "IBX PCH hdmi port still using transcoder B\n");
1576 0 : }
1577 :
1578 0 : static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1579 : enum pipe pipe)
1580 : {
1581 : u32 val;
1582 :
1583 0 : assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1584 0 : assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1585 0 : assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1586 :
1587 0 : val = I915_READ(PCH_ADPA);
1588 0 : I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1589 : "PCH VGA enabled on transcoder %c, should be disabled\n",
1590 : pipe_name(pipe));
1591 :
1592 0 : val = I915_READ(PCH_LVDS);
1593 0 : I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1594 : "PCH LVDS enabled on transcoder %c, should be disabled\n",
1595 : pipe_name(pipe));
1596 :
1597 0 : assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1598 0 : assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1599 0 : assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1600 0 : }
1601 :
1602 0 : static void vlv_enable_pll(struct intel_crtc *crtc,
1603 : const struct intel_crtc_state *pipe_config)
1604 : {
1605 0 : struct drm_device *dev = crtc->base.dev;
1606 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1607 0 : int reg = DPLL(crtc->pipe);
1608 0 : u32 dpll = pipe_config->dpll_hw_state.dpll;
1609 :
1610 0 : assert_pipe_disabled(dev_priv, crtc->pipe);
1611 :
1612 : /* No really, not for ILK+ */
1613 0 : BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1614 :
1615 : /* PLL is protected by panel, make sure we can write it */
1616 0 : if (IS_MOBILE(dev_priv->dev))
1617 0 : assert_panel_unlocked(dev_priv, crtc->pipe);
1618 :
1619 0 : I915_WRITE(reg, dpll);
1620 0 : POSTING_READ(reg);
1621 0 : udelay(150);
1622 :
1623 0 : if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1624 0 : DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1625 :
1626 0 : I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1627 0 : POSTING_READ(DPLL_MD(crtc->pipe));
1628 :
1629 : /* We do this three times for luck */
1630 0 : I915_WRITE(reg, dpll);
1631 0 : POSTING_READ(reg);
1632 0 : udelay(150); /* wait for warmup */
1633 0 : I915_WRITE(reg, dpll);
1634 0 : POSTING_READ(reg);
1635 0 : udelay(150); /* wait for warmup */
1636 0 : I915_WRITE(reg, dpll);
1637 0 : POSTING_READ(reg);
1638 0 : udelay(150); /* wait for warmup */
1639 0 : }
1640 :
1641 0 : static void chv_enable_pll(struct intel_crtc *crtc,
1642 : const struct intel_crtc_state *pipe_config)
1643 : {
1644 0 : struct drm_device *dev = crtc->base.dev;
1645 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1646 0 : int pipe = crtc->pipe;
1647 0 : enum dpio_channel port = vlv_pipe_to_channel(pipe);
1648 : u32 tmp;
1649 :
1650 0 : assert_pipe_disabled(dev_priv, crtc->pipe);
1651 :
1652 0 : BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1653 :
1654 0 : mutex_lock(&dev_priv->sb_lock);
1655 :
1656 : /* Enable back the 10bit clock to display controller */
1657 0 : tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1658 0 : tmp |= DPIO_DCLKP_EN;
1659 0 : vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1660 :
1661 0 : mutex_unlock(&dev_priv->sb_lock);
1662 :
1663 : /*
1664 : * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1665 : */
1666 0 : udelay(1);
1667 :
1668 : /* Enable PLL */
1669 0 : I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1670 :
1671 : /* Check PLL is locked */
1672 0 : if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1673 0 : DRM_ERROR("PLL %d failed to lock\n", pipe);
1674 :
1675 : /* not sure when this should be written */
1676 0 : I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1677 0 : POSTING_READ(DPLL_MD(pipe));
1678 0 : }
1679 :
1680 0 : static int intel_num_dvo_pipes(struct drm_device *dev)
1681 : {
1682 : struct intel_crtc *crtc;
1683 : int count = 0;
1684 :
1685 0 : for_each_intel_crtc(dev, crtc)
1686 0 : count += crtc->base.state->active &&
1687 0 : intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1688 :
1689 0 : return count;
1690 : }
1691 :
1692 0 : static void i9xx_enable_pll(struct intel_crtc *crtc)
1693 : {
1694 0 : struct drm_device *dev = crtc->base.dev;
1695 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1696 0 : int reg = DPLL(crtc->pipe);
1697 0 : u32 dpll = crtc->config->dpll_hw_state.dpll;
1698 :
1699 0 : assert_pipe_disabled(dev_priv, crtc->pipe);
1700 :
1701 : /* No really, not for ILK+ */
1702 0 : BUG_ON(INTEL_INFO(dev)->gen >= 5);
1703 :
1704 : /* PLL is protected by panel, make sure we can write it */
1705 0 : if (IS_MOBILE(dev) && !IS_I830(dev))
1706 0 : assert_panel_unlocked(dev_priv, crtc->pipe);
1707 :
1708 : /* Enable DVO 2x clock on both PLLs if necessary */
1709 0 : if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1710 : /*
1711 : * It appears to be important that we don't enable this
1712 : * for the current pipe before otherwise configuring the
1713 : * PLL. No idea how this should be handled if multiple
1714 : * DVO outputs are enabled simultaneosly.
1715 : */
1716 0 : dpll |= DPLL_DVO_2X_MODE;
1717 0 : I915_WRITE(DPLL(!crtc->pipe),
1718 : I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1719 0 : }
1720 :
1721 : /*
1722 : * Apparently we need to have VGA mode enabled prior to changing
1723 : * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1724 : * dividers, even though the register value does change.
1725 : */
1726 0 : I915_WRITE(reg, 0);
1727 :
1728 0 : I915_WRITE(reg, dpll);
1729 :
1730 : /* Wait for the clocks to stabilize. */
1731 0 : POSTING_READ(reg);
1732 0 : udelay(150);
1733 :
1734 0 : if (INTEL_INFO(dev)->gen >= 4) {
1735 0 : I915_WRITE(DPLL_MD(crtc->pipe),
1736 : crtc->config->dpll_hw_state.dpll_md);
1737 0 : } else {
1738 : /* The pixel multiplier can only be updated once the
1739 : * DPLL is enabled and the clocks are stable.
1740 : *
1741 : * So write it again.
1742 : */
1743 0 : I915_WRITE(reg, dpll);
1744 : }
1745 :
1746 : /* We do this three times for luck */
1747 0 : I915_WRITE(reg, dpll);
1748 0 : POSTING_READ(reg);
1749 0 : udelay(150); /* wait for warmup */
1750 0 : I915_WRITE(reg, dpll);
1751 0 : POSTING_READ(reg);
1752 0 : udelay(150); /* wait for warmup */
1753 0 : I915_WRITE(reg, dpll);
1754 0 : POSTING_READ(reg);
1755 0 : udelay(150); /* wait for warmup */
1756 0 : }
1757 :
1758 : /**
1759 : * i9xx_disable_pll - disable a PLL
1760 : * @dev_priv: i915 private structure
1761 : * @pipe: pipe PLL to disable
1762 : *
1763 : * Disable the PLL for @pipe, making sure the pipe is off first.
1764 : *
1765 : * Note! This is for pre-ILK only.
1766 : */
1767 0 : static void i9xx_disable_pll(struct intel_crtc *crtc)
1768 : {
1769 0 : struct drm_device *dev = crtc->base.dev;
1770 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1771 0 : enum pipe pipe = crtc->pipe;
1772 :
1773 : /* Disable DVO 2x clock on both PLLs if necessary */
1774 0 : if (IS_I830(dev) &&
1775 0 : intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1776 0 : !intel_num_dvo_pipes(dev)) {
1777 0 : I915_WRITE(DPLL(PIPE_B),
1778 : I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1779 0 : I915_WRITE(DPLL(PIPE_A),
1780 : I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1781 0 : }
1782 :
1783 : /* Don't disable pipe or pipe PLLs if needed */
1784 0 : if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1785 0 : (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1786 0 : return;
1787 :
1788 : /* Make sure the pipe isn't still relying on us */
1789 0 : assert_pipe_disabled(dev_priv, pipe);
1790 :
1791 0 : I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1792 0 : POSTING_READ(DPLL(pipe));
1793 0 : }
1794 :
1795 0 : static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1796 : {
1797 : u32 val;
1798 :
1799 : /* Make sure the pipe isn't still relying on us */
1800 0 : assert_pipe_disabled(dev_priv, pipe);
1801 :
1802 : /*
1803 : * Leave integrated clock source and reference clock enabled for pipe B.
1804 : * The latter is needed for VGA hotplug / manual detection.
1805 : */
1806 : val = DPLL_VGA_MODE_DIS;
1807 0 : if (pipe == PIPE_B)
1808 : val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
1809 0 : I915_WRITE(DPLL(pipe), val);
1810 0 : POSTING_READ(DPLL(pipe));
1811 :
1812 0 : }
1813 :
1814 0 : static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1815 : {
1816 0 : enum dpio_channel port = vlv_pipe_to_channel(pipe);
1817 : u32 val;
1818 :
1819 : /* Make sure the pipe isn't still relying on us */
1820 0 : assert_pipe_disabled(dev_priv, pipe);
1821 :
1822 : /* Set PLL en = 0 */
1823 : val = DPLL_SSC_REF_CLK_CHV |
1824 : DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1825 0 : if (pipe != PIPE_A)
1826 0 : val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1827 0 : I915_WRITE(DPLL(pipe), val);
1828 0 : POSTING_READ(DPLL(pipe));
1829 :
1830 0 : mutex_lock(&dev_priv->sb_lock);
1831 :
1832 : /* Disable 10bit clock to display controller */
1833 0 : val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1834 0 : val &= ~DPIO_DCLKP_EN;
1835 0 : vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1836 :
1837 0 : mutex_unlock(&dev_priv->sb_lock);
1838 0 : }
1839 :
1840 0 : void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1841 : struct intel_digital_port *dport,
1842 : unsigned int expected_mask)
1843 : {
1844 : u32 port_mask;
1845 : int dpll_reg;
1846 :
1847 0 : switch (dport->port) {
1848 : case PORT_B:
1849 : port_mask = DPLL_PORTB_READY_MASK;
1850 0 : dpll_reg = DPLL(0);
1851 0 : break;
1852 : case PORT_C:
1853 : port_mask = DPLL_PORTC_READY_MASK;
1854 0 : dpll_reg = DPLL(0);
1855 0 : expected_mask <<= 4;
1856 0 : break;
1857 : case PORT_D:
1858 : port_mask = DPLL_PORTD_READY_MASK;
1859 : dpll_reg = DPIO_PHY_STATUS;
1860 0 : break;
1861 : default:
1862 0 : BUG();
1863 : }
1864 :
1865 0 : if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1866 0 : WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1867 : port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1868 0 : }
1869 :
1870 0 : static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1871 : {
1872 0 : struct drm_device *dev = crtc->base.dev;
1873 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1874 0 : struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1875 :
1876 0 : if (WARN_ON(pll == NULL))
1877 0 : return;
1878 :
1879 0 : WARN_ON(!pll->config.crtc_mask);
1880 0 : if (pll->active == 0) {
1881 : DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1882 0 : WARN_ON(pll->on);
1883 0 : assert_shared_dpll_disabled(dev_priv, pll);
1884 :
1885 0 : pll->mode_set(dev_priv, pll);
1886 0 : }
1887 0 : }
1888 :
1889 : /**
1890 : * intel_enable_shared_dpll - enable PCH PLL
1891 : * @dev_priv: i915 private structure
1892 : * @pipe: pipe PLL to enable
1893 : *
1894 : * The PCH PLL needs to be enabled before the PCH transcoder, since it
1895 : * drives the transcoder clock.
1896 : */
1897 0 : static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1898 : {
1899 0 : struct drm_device *dev = crtc->base.dev;
1900 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1901 0 : struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1902 :
1903 0 : if (WARN_ON(pll == NULL))
1904 0 : return;
1905 :
1906 0 : if (WARN_ON(pll->config.crtc_mask == 0))
1907 0 : return;
1908 :
1909 : DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1910 : pll->name, pll->active, pll->on,
1911 : crtc->base.base.id);
1912 :
1913 0 : if (pll->active++) {
1914 0 : WARN_ON(!pll->on);
1915 0 : assert_shared_dpll_enabled(dev_priv, pll);
1916 0 : return;
1917 : }
1918 0 : WARN_ON(pll->on);
1919 :
1920 0 : intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1921 :
1922 : DRM_DEBUG_KMS("enabling %s\n", pll->name);
1923 0 : pll->enable(dev_priv, pll);
1924 0 : pll->on = true;
1925 0 : }
1926 :
1927 0 : static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1928 : {
1929 0 : struct drm_device *dev = crtc->base.dev;
1930 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1931 0 : struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1932 :
1933 : /* PCH only available on ILK+ */
1934 0 : if (INTEL_INFO(dev)->gen < 5)
1935 0 : return;
1936 :
1937 0 : if (pll == NULL)
1938 0 : return;
1939 :
1940 0 : if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1941 0 : return;
1942 :
1943 : DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1944 : pll->name, pll->active, pll->on,
1945 : crtc->base.base.id);
1946 :
1947 0 : if (WARN_ON(pll->active == 0)) {
1948 0 : assert_shared_dpll_disabled(dev_priv, pll);
1949 0 : return;
1950 : }
1951 :
1952 0 : assert_shared_dpll_enabled(dev_priv, pll);
1953 0 : WARN_ON(!pll->on);
1954 0 : if (--pll->active)
1955 0 : return;
1956 :
1957 : DRM_DEBUG_KMS("disabling %s\n", pll->name);
1958 0 : pll->disable(dev_priv, pll);
1959 0 : pll->on = false;
1960 :
1961 0 : intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1962 0 : }
1963 :
1964 0 : static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1965 : enum pipe pipe)
1966 : {
1967 0 : struct drm_device *dev = dev_priv->dev;
1968 0 : struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1969 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1970 : uint32_t reg, val, pipeconf_val;
1971 :
1972 : /* PCH only available on ILK+ */
1973 0 : BUG_ON(!HAS_PCH_SPLIT(dev));
1974 :
1975 : /* Make sure PCH DPLL is enabled */
1976 0 : assert_shared_dpll_enabled(dev_priv,
1977 : intel_crtc_to_shared_dpll(intel_crtc));
1978 :
1979 : /* FDI must be feeding us bits for PCH ports */
1980 0 : assert_fdi_tx_enabled(dev_priv, pipe);
1981 0 : assert_fdi_rx_enabled(dev_priv, pipe);
1982 :
1983 0 : if (HAS_PCH_CPT(dev)) {
1984 : /* Workaround: Set the timing override bit before enabling the
1985 : * pch transcoder. */
1986 0 : reg = TRANS_CHICKEN2(pipe);
1987 0 : val = I915_READ(reg);
1988 0 : val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1989 0 : I915_WRITE(reg, val);
1990 0 : }
1991 :
1992 0 : reg = PCH_TRANSCONF(pipe);
1993 0 : val = I915_READ(reg);
1994 0 : pipeconf_val = I915_READ(PIPECONF(pipe));
1995 :
1996 0 : if (HAS_PCH_IBX(dev_priv->dev)) {
1997 : /*
1998 : * Make the BPC in transcoder be consistent with
1999 : * that in pipeconf reg. For HDMI we must use 8bpc
2000 : * here for both 8bpc and 12bpc.
2001 : */
2002 0 : val &= ~PIPECONF_BPC_MASK;
2003 0 : if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
2004 0 : val |= PIPECONF_8BPC;
2005 : else
2006 0 : val |= pipeconf_val & PIPECONF_BPC_MASK;
2007 : }
2008 :
2009 0 : val &= ~TRANS_INTERLACE_MASK;
2010 0 : if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2011 0 : if (HAS_PCH_IBX(dev_priv->dev) &&
2012 0 : intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
2013 0 : val |= TRANS_LEGACY_INTERLACED_ILK;
2014 : else
2015 0 : val |= TRANS_INTERLACED;
2016 : else
2017 : val |= TRANS_PROGRESSIVE;
2018 :
2019 0 : I915_WRITE(reg, val | TRANS_ENABLE);
2020 0 : if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2021 0 : DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2022 0 : }
2023 :
2024 0 : static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2025 : enum transcoder cpu_transcoder)
2026 : {
2027 : u32 val, pipeconf_val;
2028 :
2029 : /* PCH only available on ILK+ */
2030 0 : BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2031 :
2032 : /* FDI must be feeding us bits for PCH ports */
2033 0 : assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2034 0 : assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2035 :
2036 : /* Workaround: set timing override bit. */
2037 0 : val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2038 0 : val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2039 0 : I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2040 :
2041 : val = TRANS_ENABLE;
2042 0 : pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2043 :
2044 0 : if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2045 : PIPECONF_INTERLACED_ILK)
2046 0 : val |= TRANS_INTERLACED;
2047 : else
2048 : val |= TRANS_PROGRESSIVE;
2049 :
2050 0 : I915_WRITE(LPT_TRANSCONF, val);
2051 0 : if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2052 0 : DRM_ERROR("Failed to enable PCH transcoder\n");
2053 0 : }
2054 :
2055 0 : static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2056 : enum pipe pipe)
2057 : {
2058 0 : struct drm_device *dev = dev_priv->dev;
2059 : uint32_t reg, val;
2060 :
2061 : /* FDI relies on the transcoder */
2062 0 : assert_fdi_tx_disabled(dev_priv, pipe);
2063 0 : assert_fdi_rx_disabled(dev_priv, pipe);
2064 :
2065 : /* Ports must be off as well */
2066 0 : assert_pch_ports_disabled(dev_priv, pipe);
2067 :
2068 0 : reg = PCH_TRANSCONF(pipe);
2069 0 : val = I915_READ(reg);
2070 0 : val &= ~TRANS_ENABLE;
2071 0 : I915_WRITE(reg, val);
2072 : /* wait for PCH transcoder off, transcoder state */
2073 0 : if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2074 0 : DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2075 :
2076 0 : if (!HAS_PCH_IBX(dev)) {
2077 : /* Workaround: Clear the timing override chicken bit again. */
2078 0 : reg = TRANS_CHICKEN2(pipe);
2079 0 : val = I915_READ(reg);
2080 0 : val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2081 0 : I915_WRITE(reg, val);
2082 0 : }
2083 0 : }
2084 :
2085 0 : static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2086 : {
2087 : u32 val;
2088 :
2089 0 : val = I915_READ(LPT_TRANSCONF);
2090 0 : val &= ~TRANS_ENABLE;
2091 0 : I915_WRITE(LPT_TRANSCONF, val);
2092 : /* wait for PCH transcoder off, transcoder state */
2093 0 : if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2094 0 : DRM_ERROR("Failed to disable PCH transcoder\n");
2095 :
2096 : /* Workaround: clear timing override bit. */
2097 0 : val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2098 0 : val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2099 0 : I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2100 0 : }
2101 :
2102 : /**
2103 : * intel_enable_pipe - enable a pipe, asserting requirements
2104 : * @crtc: crtc responsible for the pipe
2105 : *
2106 : * Enable @crtc's pipe, making sure that various hardware specific requirements
2107 : * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2108 : */
2109 0 : static void intel_enable_pipe(struct intel_crtc *crtc)
2110 : {
2111 0 : struct drm_device *dev = crtc->base.dev;
2112 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2113 0 : enum pipe pipe = crtc->pipe;
2114 0 : enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2115 : pipe);
2116 : enum pipe pch_transcoder;
2117 : int reg;
2118 : u32 val;
2119 :
2120 : DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2121 :
2122 0 : assert_planes_disabled(dev_priv, pipe);
2123 0 : assert_cursor_disabled(dev_priv, pipe);
2124 0 : assert_sprites_disabled(dev_priv, pipe);
2125 :
2126 0 : if (HAS_PCH_LPT(dev_priv->dev))
2127 0 : pch_transcoder = TRANSCODER_A;
2128 : else
2129 : pch_transcoder = pipe;
2130 :
2131 : /*
2132 : * A pipe without a PLL won't actually be able to drive bits from
2133 : * a plane. On ILK+ the pipe PLLs are integrated, so we don't
2134 : * need the check.
2135 : */
2136 0 : if (HAS_GMCH_DISPLAY(dev_priv->dev))
2137 0 : if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2138 0 : assert_dsi_pll_enabled(dev_priv);
2139 : else
2140 0 : assert_pll_enabled(dev_priv, pipe);
2141 : else {
2142 0 : if (crtc->config->has_pch_encoder) {
2143 : /* if driving the PCH, we need FDI enabled */
2144 0 : assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2145 0 : assert_fdi_tx_pll_enabled(dev_priv,
2146 : (enum pipe) cpu_transcoder);
2147 0 : }
2148 : /* FIXME: assert CPU port conditions for SNB+ */
2149 : }
2150 :
2151 0 : reg = PIPECONF(cpu_transcoder);
2152 0 : val = I915_READ(reg);
2153 0 : if (val & PIPECONF_ENABLE) {
2154 0 : WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2155 : (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2156 0 : return;
2157 : }
2158 :
2159 0 : I915_WRITE(reg, val | PIPECONF_ENABLE);
2160 0 : POSTING_READ(reg);
2161 0 : }
2162 :
2163 : /**
2164 : * intel_disable_pipe - disable a pipe, asserting requirements
2165 : * @crtc: crtc whose pipes is to be disabled
2166 : *
2167 : * Disable the pipe of @crtc, making sure that various hardware
2168 : * specific requirements are met, if applicable, e.g. plane
2169 : * disabled, panel fitter off, etc.
2170 : *
2171 : * Will wait until the pipe has shut down before returning.
2172 : */
2173 0 : static void intel_disable_pipe(struct intel_crtc *crtc)
2174 : {
2175 0 : struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2176 0 : enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2177 0 : enum pipe pipe = crtc->pipe;
2178 : int reg;
2179 : u32 val;
2180 :
2181 : DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2182 :
2183 : /*
2184 : * Make sure planes won't keep trying to pump pixels to us,
2185 : * or we might hang the display.
2186 : */
2187 0 : assert_planes_disabled(dev_priv, pipe);
2188 0 : assert_cursor_disabled(dev_priv, pipe);
2189 0 : assert_sprites_disabled(dev_priv, pipe);
2190 :
2191 0 : reg = PIPECONF(cpu_transcoder);
2192 0 : val = I915_READ(reg);
2193 0 : if ((val & PIPECONF_ENABLE) == 0)
2194 0 : return;
2195 :
2196 : /*
2197 : * Double wide has implications for planes
2198 : * so best keep it disabled when not needed.
2199 : */
2200 0 : if (crtc->config->double_wide)
2201 0 : val &= ~PIPECONF_DOUBLE_WIDE;
2202 :
2203 : /* Don't disable pipe or pipe PLLs if needed */
2204 0 : if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2205 0 : !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2206 0 : val &= ~PIPECONF_ENABLE;
2207 :
2208 0 : I915_WRITE(reg, val);
2209 0 : if ((val & PIPECONF_ENABLE) == 0)
2210 0 : intel_wait_for_pipe_off(crtc);
2211 0 : }
2212 :
2213 0 : static bool need_vtd_wa(struct drm_device *dev)
2214 : {
2215 : #ifdef CONFIG_INTEL_IOMMU
2216 : if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2217 : return true;
2218 : #endif
2219 0 : return false;
2220 : }
2221 :
2222 : unsigned int
2223 0 : intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2224 : uint64_t fb_format_modifier, unsigned int plane)
2225 : {
2226 : unsigned int tile_height;
2227 : uint32_t pixel_bytes;
2228 :
2229 0 : switch (fb_format_modifier) {
2230 : case DRM_FORMAT_MOD_NONE:
2231 : tile_height = 1;
2232 0 : break;
2233 : case I915_FORMAT_MOD_X_TILED:
2234 0 : tile_height = IS_GEN2(dev) ? 16 : 8;
2235 0 : break;
2236 : case I915_FORMAT_MOD_Y_TILED:
2237 : tile_height = 32;
2238 0 : break;
2239 : case I915_FORMAT_MOD_Yf_TILED:
2240 0 : pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
2241 0 : switch (pixel_bytes) {
2242 : default:
2243 : case 1:
2244 : tile_height = 64;
2245 0 : break;
2246 : case 2:
2247 : case 4:
2248 : tile_height = 32;
2249 0 : break;
2250 : case 8:
2251 : tile_height = 16;
2252 0 : break;
2253 : case 16:
2254 0 : WARN_ONCE(1,
2255 : "128-bit pixels are not supported for display!");
2256 : tile_height = 16;
2257 0 : break;
2258 : }
2259 : break;
2260 : default:
2261 0 : MISSING_CASE(fb_format_modifier);
2262 : tile_height = 1;
2263 0 : break;
2264 : }
2265 :
2266 0 : return tile_height;
2267 : }
2268 :
2269 : unsigned int
2270 0 : intel_fb_align_height(struct drm_device *dev, unsigned int height,
2271 : uint32_t pixel_format, uint64_t fb_format_modifier)
2272 : {
2273 0 : return roundup2(height, intel_tile_height(dev, pixel_format,
2274 : fb_format_modifier, 0));
2275 : }
2276 :
2277 : static int
2278 0 : intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2279 : const struct drm_plane_state *plane_state)
2280 : {
2281 0 : struct intel_rotation_info *info = &view->rotation_info;
2282 : unsigned int tile_height, tile_pitch;
2283 :
2284 0 : *view = i915_ggtt_view_normal;
2285 :
2286 0 : if (!plane_state)
2287 0 : return 0;
2288 :
2289 0 : if (!intel_rotation_90_or_270(plane_state->rotation))
2290 0 : return 0;
2291 :
2292 0 : *view = i915_ggtt_view_rotated;
2293 :
2294 0 : info->height = fb->height;
2295 0 : info->pixel_format = fb->pixel_format;
2296 0 : info->pitch = fb->pitches[0];
2297 0 : info->uv_offset = fb->offsets[1];
2298 0 : info->fb_modifier = fb->modifier[0];
2299 :
2300 0 : tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2301 0 : fb->modifier[0], 0);
2302 0 : tile_pitch = PAGE_SIZE / tile_height;
2303 0 : info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2304 0 : info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2305 0 : info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2306 :
2307 0 : if (info->pixel_format == DRM_FORMAT_NV12) {
2308 0 : tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2309 0 : fb->modifier[0], 1);
2310 0 : tile_pitch = PAGE_SIZE / tile_height;
2311 0 : info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2312 0 : info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2313 : tile_height);
2314 0 : info->size_uv = info->width_pages_uv * info->height_pages_uv *
2315 : PAGE_SIZE;
2316 0 : }
2317 :
2318 0 : return 0;
2319 0 : }
2320 :
2321 0 : static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2322 : {
2323 0 : if (INTEL_INFO(dev_priv)->gen >= 9)
2324 0 : return 256 * 1024;
2325 0 : else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2326 0 : IS_VALLEYVIEW(dev_priv))
2327 0 : return 128 * 1024;
2328 0 : else if (INTEL_INFO(dev_priv)->gen >= 4)
2329 0 : return 4 * 1024;
2330 : else
2331 0 : return 0;
2332 0 : }
2333 :
2334 : int
2335 0 : intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2336 : struct drm_framebuffer *fb,
2337 : const struct drm_plane_state *plane_state,
2338 : struct intel_engine_cs *pipelined,
2339 : struct drm_i915_gem_request **pipelined_request)
2340 : {
2341 0 : struct drm_device *dev = fb->dev;
2342 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2343 0 : struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2344 0 : struct i915_ggtt_view view;
2345 : u32 alignment;
2346 : int ret;
2347 :
2348 0 : WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2349 :
2350 0 : switch (fb->modifier[0]) {
2351 : case DRM_FORMAT_MOD_NONE:
2352 0 : alignment = intel_linear_alignment(dev_priv);
2353 0 : break;
2354 : case I915_FORMAT_MOD_X_TILED:
2355 0 : if (INTEL_INFO(dev)->gen >= 9)
2356 0 : alignment = 256 * 1024;
2357 : else {
2358 : /* pin() will align the object as required by fence */
2359 : alignment = 0;
2360 : }
2361 : break;
2362 : case I915_FORMAT_MOD_Y_TILED:
2363 : case I915_FORMAT_MOD_Yf_TILED:
2364 0 : if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2365 : "Y tiling bo slipped through, driver bug!\n"))
2366 0 : return -EINVAL;
2367 : alignment = 1 * 1024 * 1024;
2368 0 : break;
2369 : default:
2370 0 : MISSING_CASE(fb->modifier[0]);
2371 0 : return -EINVAL;
2372 : }
2373 :
2374 0 : ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2375 0 : if (ret)
2376 0 : return ret;
2377 :
2378 : /* Note that the w/a also requires 64 PTE of padding following the
2379 : * bo. We currently fill all unused PTE with the shadow page and so
2380 : * we should always have valid PTE following the scanout preventing
2381 : * the VT-d warning.
2382 : */
2383 0 : if (need_vtd_wa(dev) && alignment < 256 * 1024)
2384 0 : alignment = 256 * 1024;
2385 :
2386 : /*
2387 : * Global gtt pte registers are special registers which actually forward
2388 : * writes to a chunk of system memory. Which means that there is no risk
2389 : * that the register values disappear as soon as we call
2390 : * intel_runtime_pm_put(), so it is correct to wrap only the
2391 : * pin/unpin/fence and not more.
2392 : */
2393 0 : intel_runtime_pm_get(dev_priv);
2394 :
2395 0 : dev_priv->mm.interruptible = false;
2396 0 : ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
2397 : pipelined_request, &view);
2398 0 : if (ret)
2399 : goto err_interruptible;
2400 :
2401 : /* Install a fence for tiled scan-out. Pre-i965 always needs a
2402 : * fence, whereas 965+ only requires a fence if using
2403 : * framebuffer compression. For simplicity, we always install
2404 : * a fence as the cost is not that onerous.
2405 : */
2406 0 : if (view.type == I915_GGTT_VIEW_NORMAL) {
2407 0 : ret = i915_gem_object_get_fence(obj);
2408 0 : if (ret == -EDEADLK) {
2409 : /*
2410 : * -EDEADLK means there are no free fences
2411 : * no pending flips.
2412 : *
2413 : * This is propagated to atomic, but it uses
2414 : * -EDEADLK to force a locking recovery, so
2415 : * change the returned error to -EBUSY.
2416 : */
2417 : ret = -EBUSY;
2418 0 : goto err_unpin;
2419 0 : } else if (ret)
2420 : goto err_unpin;
2421 :
2422 0 : i915_gem_object_pin_fence(obj);
2423 0 : }
2424 :
2425 0 : dev_priv->mm.interruptible = true;
2426 0 : intel_runtime_pm_put(dev_priv);
2427 0 : return 0;
2428 :
2429 : err_unpin:
2430 0 : i915_gem_object_unpin_from_display_plane(obj, &view);
2431 : err_interruptible:
2432 0 : dev_priv->mm.interruptible = true;
2433 0 : intel_runtime_pm_put(dev_priv);
2434 0 : return ret;
2435 0 : }
2436 :
2437 0 : static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2438 : const struct drm_plane_state *plane_state)
2439 : {
2440 0 : struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2441 0 : struct i915_ggtt_view view;
2442 : int ret;
2443 :
2444 0 : WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2445 :
2446 0 : ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2447 0 : WARN_ONCE(ret, "Couldn't get view from plane state!");
2448 :
2449 0 : if (view.type == I915_GGTT_VIEW_NORMAL)
2450 0 : i915_gem_object_unpin_fence(obj);
2451 :
2452 0 : i915_gem_object_unpin_from_display_plane(obj, &view);
2453 0 : }
2454 :
2455 : /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2456 : * is assumed to be a power-of-two. */
2457 0 : unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
2458 : int *x, int *y,
2459 : unsigned int tiling_mode,
2460 : unsigned int cpp,
2461 : unsigned int pitch)
2462 : {
2463 0 : if (tiling_mode != I915_TILING_NONE) {
2464 : unsigned int tile_rows, tiles;
2465 :
2466 0 : tile_rows = *y / 8;
2467 0 : *y %= 8;
2468 :
2469 0 : tiles = *x / (512/cpp);
2470 0 : *x %= 512/cpp;
2471 :
2472 0 : return tile_rows * pitch * 8 + tiles * 4096;
2473 : } else {
2474 0 : unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
2475 : unsigned int offset;
2476 :
2477 0 : offset = *y * pitch + *x * cpp;
2478 0 : *y = (offset & alignment) / pitch;
2479 0 : *x = ((offset & alignment) - *y * pitch) / cpp;
2480 0 : return offset & ~alignment;
2481 : }
2482 0 : }
2483 :
2484 0 : static int i9xx_format_to_fourcc(int format)
2485 : {
2486 0 : switch (format) {
2487 : case DISPPLANE_8BPP:
2488 0 : return DRM_FORMAT_C8;
2489 : case DISPPLANE_BGRX555:
2490 0 : return DRM_FORMAT_XRGB1555;
2491 : case DISPPLANE_BGRX565:
2492 0 : return DRM_FORMAT_RGB565;
2493 : default:
2494 : case DISPPLANE_BGRX888:
2495 0 : return DRM_FORMAT_XRGB8888;
2496 : case DISPPLANE_RGBX888:
2497 0 : return DRM_FORMAT_XBGR8888;
2498 : case DISPPLANE_BGRX101010:
2499 0 : return DRM_FORMAT_XRGB2101010;
2500 : case DISPPLANE_RGBX101010:
2501 0 : return DRM_FORMAT_XBGR2101010;
2502 : }
2503 0 : }
2504 :
2505 0 : static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2506 : {
2507 0 : switch (format) {
2508 : case PLANE_CTL_FORMAT_RGB_565:
2509 0 : return DRM_FORMAT_RGB565;
2510 : default:
2511 : case PLANE_CTL_FORMAT_XRGB_8888:
2512 0 : if (rgb_order) {
2513 0 : if (alpha)
2514 0 : return DRM_FORMAT_ABGR8888;
2515 : else
2516 0 : return DRM_FORMAT_XBGR8888;
2517 : } else {
2518 0 : if (alpha)
2519 0 : return DRM_FORMAT_ARGB8888;
2520 : else
2521 0 : return DRM_FORMAT_XRGB8888;
2522 : }
2523 : case PLANE_CTL_FORMAT_XRGB_2101010:
2524 0 : if (rgb_order)
2525 0 : return DRM_FORMAT_XBGR2101010;
2526 : else
2527 0 : return DRM_FORMAT_XRGB2101010;
2528 : }
2529 0 : }
2530 :
2531 : static bool
2532 0 : intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2533 : struct intel_initial_plane_config *plane_config)
2534 : {
2535 0 : struct drm_device *dev = crtc->base.dev;
2536 0 : struct drm_i915_private *dev_priv = to_i915(dev);
2537 : struct drm_i915_gem_object *obj = NULL;
2538 0 : struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2539 0 : struct drm_framebuffer *fb = &plane_config->fb->base;
2540 0 : u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2541 0 : u32 size_aligned = round_up(plane_config->base + plane_config->size,
2542 : PAGE_SIZE);
2543 :
2544 0 : size_aligned -= base_aligned;
2545 :
2546 0 : if (plane_config->size == 0)
2547 0 : return false;
2548 :
2549 : /* If the FB is too big, just don't use it since fbdev is not very
2550 : * important and we should probably use that space with FBC or other
2551 : * features. */
2552 0 : if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2553 0 : return false;
2554 :
2555 0 : obj = i915_gem_object_create_stolen_for_preallocated(dev,
2556 : base_aligned,
2557 : base_aligned,
2558 : size_aligned);
2559 0 : if (!obj)
2560 0 : return false;
2561 :
2562 0 : obj->tiling_mode = plane_config->tiling;
2563 0 : if (obj->tiling_mode == I915_TILING_X)
2564 0 : obj->stride = fb->pitches[0];
2565 :
2566 0 : mode_cmd.pixel_format = fb->pixel_format;
2567 0 : mode_cmd.width = fb->width;
2568 0 : mode_cmd.height = fb->height;
2569 0 : mode_cmd.pitches[0] = fb->pitches[0];
2570 0 : mode_cmd.modifier[0] = fb->modifier[0];
2571 0 : mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2572 :
2573 0 : mutex_lock(&dev->struct_mutex);
2574 0 : if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2575 : &mode_cmd, obj)) {
2576 : DRM_DEBUG_KMS("intel fb init failed\n");
2577 : goto out_unref_obj;
2578 : }
2579 0 : mutex_unlock(&dev->struct_mutex);
2580 :
2581 : DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2582 0 : return true;
2583 :
2584 : out_unref_obj:
2585 0 : drm_gem_object_unreference(&obj->base);
2586 0 : mutex_unlock(&dev->struct_mutex);
2587 0 : return false;
2588 0 : }
2589 :
2590 : /* Update plane->state->fb to match plane->fb after driver-internal updates */
2591 : static void
2592 0 : update_state_fb(struct drm_plane *plane)
2593 : {
2594 0 : if (plane->fb == plane->state->fb)
2595 : return;
2596 :
2597 0 : if (plane->state->fb)
2598 0 : drm_framebuffer_unreference(plane->state->fb);
2599 0 : plane->state->fb = plane->fb;
2600 0 : if (plane->state->fb)
2601 0 : drm_framebuffer_reference(plane->state->fb);
2602 0 : }
2603 :
2604 : static void
2605 0 : intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2606 : struct intel_initial_plane_config *plane_config)
2607 : {
2608 0 : struct drm_device *dev = intel_crtc->base.dev;
2609 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2610 : struct drm_crtc *c;
2611 : struct intel_crtc *i;
2612 : struct drm_i915_gem_object *obj;
2613 0 : struct drm_plane *primary = intel_crtc->base.primary;
2614 0 : struct drm_plane_state *plane_state = primary->state;
2615 0 : struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2616 0 : struct intel_plane *intel_plane = to_intel_plane(primary);
2617 : struct drm_framebuffer *fb;
2618 :
2619 0 : if (!plane_config->fb)
2620 0 : return;
2621 :
2622 0 : if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2623 0 : fb = &plane_config->fb->base;
2624 0 : goto valid_fb;
2625 : }
2626 :
2627 0 : kfree(plane_config->fb);
2628 :
2629 : /*
2630 : * Failed to alloc the obj, check to see if we should share
2631 : * an fb with another CRTC instead
2632 : */
2633 0 : for_each_crtc(dev, c) {
2634 0 : i = to_intel_crtc(c);
2635 :
2636 0 : if (c == &intel_crtc->base)
2637 : continue;
2638 :
2639 0 : if (!i->active)
2640 : continue;
2641 :
2642 0 : fb = c->primary->fb;
2643 0 : if (!fb)
2644 : continue;
2645 :
2646 0 : obj = intel_fb_obj(fb);
2647 0 : if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2648 0 : drm_framebuffer_reference(fb);
2649 0 : goto valid_fb;
2650 : }
2651 : }
2652 :
2653 : /*
2654 : * We've failed to reconstruct the BIOS FB. Current display state
2655 : * indicates that the primary plane is visible, but has a NULL FB,
2656 : * which will lead to problems later if we don't fix it up. The
2657 : * simplest solution is to just disable the primary plane now and
2658 : * pretend the BIOS never had it enabled.
2659 : */
2660 0 : to_intel_plane_state(plane_state)->visible = false;
2661 0 : crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2662 0 : intel_pre_disable_primary(&intel_crtc->base);
2663 0 : intel_plane->disable_plane(primary, &intel_crtc->base);
2664 :
2665 0 : return;
2666 :
2667 : valid_fb:
2668 0 : plane_state->src_x = 0;
2669 0 : plane_state->src_y = 0;
2670 0 : plane_state->src_w = fb->width << 16;
2671 0 : plane_state->src_h = fb->height << 16;
2672 :
2673 0 : plane_state->crtc_x = 0;
2674 0 : plane_state->crtc_y = 0;
2675 0 : plane_state->crtc_w = fb->width;
2676 0 : plane_state->crtc_h = fb->height;
2677 :
2678 0 : obj = intel_fb_obj(fb);
2679 0 : if (obj->tiling_mode != I915_TILING_NONE)
2680 0 : dev_priv->preserve_bios_swizzle = true;
2681 :
2682 0 : drm_framebuffer_reference(fb);
2683 0 : primary->fb = primary->state->fb = fb;
2684 0 : primary->crtc = primary->state->crtc = &intel_crtc->base;
2685 0 : intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2686 0 : obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2687 0 : }
2688 :
2689 0 : static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2690 : struct drm_framebuffer *fb,
2691 : int x, int y)
2692 : {
2693 0 : struct drm_device *dev = crtc->dev;
2694 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2695 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2696 0 : struct drm_plane *primary = crtc->primary;
2697 0 : bool visible = to_intel_plane_state(primary->state)->visible;
2698 : struct drm_i915_gem_object *obj;
2699 0 : int plane = intel_crtc->plane;
2700 : unsigned long linear_offset;
2701 : u32 dspcntr;
2702 0 : u32 reg = DSPCNTR(plane);
2703 : int pixel_size;
2704 :
2705 0 : if (!visible || !fb) {
2706 0 : I915_WRITE(reg, 0);
2707 0 : if (INTEL_INFO(dev)->gen >= 4)
2708 0 : I915_WRITE(DSPSURF(plane), 0);
2709 : else
2710 0 : I915_WRITE(DSPADDR(plane), 0);
2711 0 : POSTING_READ(reg);
2712 0 : return;
2713 : }
2714 :
2715 0 : obj = intel_fb_obj(fb);
2716 0 : if (WARN_ON(obj == NULL))
2717 0 : return;
2718 :
2719 0 : pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2720 :
2721 : dspcntr = DISPPLANE_GAMMA_ENABLE;
2722 :
2723 : dspcntr |= DISPLAY_PLANE_ENABLE;
2724 :
2725 0 : if (INTEL_INFO(dev)->gen < 4) {
2726 0 : if (intel_crtc->pipe == PIPE_B)
2727 0 : dspcntr |= DISPPLANE_SEL_PIPE_B;
2728 :
2729 : /* pipesrc and dspsize control the size that is scaled from,
2730 : * which should always be the user's requested size.
2731 : */
2732 0 : I915_WRITE(DSPSIZE(plane),
2733 : ((intel_crtc->config->pipe_src_h - 1) << 16) |
2734 : (intel_crtc->config->pipe_src_w - 1));
2735 0 : I915_WRITE(DSPPOS(plane), 0);
2736 0 : } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2737 0 : I915_WRITE(PRIMSIZE(plane),
2738 : ((intel_crtc->config->pipe_src_h - 1) << 16) |
2739 : (intel_crtc->config->pipe_src_w - 1));
2740 0 : I915_WRITE(PRIMPOS(plane), 0);
2741 0 : I915_WRITE(PRIMCNSTALPHA(plane), 0);
2742 0 : }
2743 :
2744 0 : switch (fb->pixel_format) {
2745 : case DRM_FORMAT_C8:
2746 0 : dspcntr |= DISPPLANE_8BPP;
2747 0 : break;
2748 : case DRM_FORMAT_XRGB1555:
2749 0 : dspcntr |= DISPPLANE_BGRX555;
2750 0 : break;
2751 : case DRM_FORMAT_RGB565:
2752 0 : dspcntr |= DISPPLANE_BGRX565;
2753 0 : break;
2754 : case DRM_FORMAT_XRGB8888:
2755 0 : dspcntr |= DISPPLANE_BGRX888;
2756 0 : break;
2757 : case DRM_FORMAT_XBGR8888:
2758 0 : dspcntr |= DISPPLANE_RGBX888;
2759 0 : break;
2760 : case DRM_FORMAT_XRGB2101010:
2761 0 : dspcntr |= DISPPLANE_BGRX101010;
2762 0 : break;
2763 : case DRM_FORMAT_XBGR2101010:
2764 0 : dspcntr |= DISPPLANE_RGBX101010;
2765 0 : break;
2766 : default:
2767 0 : BUG();
2768 : }
2769 :
2770 0 : if (INTEL_INFO(dev)->gen >= 4 &&
2771 0 : obj->tiling_mode != I915_TILING_NONE)
2772 0 : dspcntr |= DISPPLANE_TILED;
2773 :
2774 0 : if (IS_G4X(dev))
2775 0 : dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2776 :
2777 0 : linear_offset = y * fb->pitches[0] + x * pixel_size;
2778 :
2779 0 : if (INTEL_INFO(dev)->gen >= 4) {
2780 0 : intel_crtc->dspaddr_offset =
2781 0 : intel_gen4_compute_page_offset(dev_priv,
2782 0 : &x, &y, obj->tiling_mode,
2783 : pixel_size,
2784 0 : fb->pitches[0]);
2785 0 : linear_offset -= intel_crtc->dspaddr_offset;
2786 0 : } else {
2787 0 : intel_crtc->dspaddr_offset = linear_offset;
2788 : }
2789 :
2790 0 : if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2791 0 : dspcntr |= DISPPLANE_ROTATE_180;
2792 :
2793 0 : x += (intel_crtc->config->pipe_src_w - 1);
2794 0 : y += (intel_crtc->config->pipe_src_h - 1);
2795 :
2796 : /* Finding the last pixel of the last line of the display
2797 : data and adding to linear_offset*/
2798 0 : linear_offset +=
2799 0 : (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2800 0 : (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2801 0 : }
2802 :
2803 0 : intel_crtc->adjusted_x = x;
2804 0 : intel_crtc->adjusted_y = y;
2805 :
2806 0 : I915_WRITE(reg, dspcntr);
2807 :
2808 0 : I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2809 0 : if (INTEL_INFO(dev)->gen >= 4) {
2810 0 : I915_WRITE(DSPSURF(plane),
2811 : i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2812 0 : I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2813 0 : I915_WRITE(DSPLINOFF(plane), linear_offset);
2814 0 : } else
2815 0 : I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2816 0 : POSTING_READ(reg);
2817 0 : }
2818 :
2819 0 : static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2820 : struct drm_framebuffer *fb,
2821 : int x, int y)
2822 : {
2823 0 : struct drm_device *dev = crtc->dev;
2824 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2825 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2826 0 : struct drm_plane *primary = crtc->primary;
2827 0 : bool visible = to_intel_plane_state(primary->state)->visible;
2828 : struct drm_i915_gem_object *obj;
2829 0 : int plane = intel_crtc->plane;
2830 : unsigned long linear_offset;
2831 : u32 dspcntr;
2832 0 : u32 reg = DSPCNTR(plane);
2833 : int pixel_size;
2834 :
2835 0 : if (!visible || !fb) {
2836 0 : I915_WRITE(reg, 0);
2837 0 : I915_WRITE(DSPSURF(plane), 0);
2838 0 : POSTING_READ(reg);
2839 0 : return;
2840 : }
2841 :
2842 0 : obj = intel_fb_obj(fb);
2843 0 : if (WARN_ON(obj == NULL))
2844 0 : return;
2845 :
2846 0 : pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2847 :
2848 : dspcntr = DISPPLANE_GAMMA_ENABLE;
2849 :
2850 : dspcntr |= DISPLAY_PLANE_ENABLE;
2851 :
2852 0 : if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2853 0 : dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2854 :
2855 0 : switch (fb->pixel_format) {
2856 : case DRM_FORMAT_C8:
2857 0 : dspcntr |= DISPPLANE_8BPP;
2858 0 : break;
2859 : case DRM_FORMAT_RGB565:
2860 0 : dspcntr |= DISPPLANE_BGRX565;
2861 0 : break;
2862 : case DRM_FORMAT_XRGB8888:
2863 0 : dspcntr |= DISPPLANE_BGRX888;
2864 0 : break;
2865 : case DRM_FORMAT_XBGR8888:
2866 0 : dspcntr |= DISPPLANE_RGBX888;
2867 0 : break;
2868 : case DRM_FORMAT_XRGB2101010:
2869 0 : dspcntr |= DISPPLANE_BGRX101010;
2870 0 : break;
2871 : case DRM_FORMAT_XBGR2101010:
2872 0 : dspcntr |= DISPPLANE_RGBX101010;
2873 0 : break;
2874 : default:
2875 0 : BUG();
2876 : }
2877 :
2878 0 : if (obj->tiling_mode != I915_TILING_NONE)
2879 0 : dspcntr |= DISPPLANE_TILED;
2880 :
2881 0 : if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2882 0 : dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2883 :
2884 0 : linear_offset = y * fb->pitches[0] + x * pixel_size;
2885 0 : intel_crtc->dspaddr_offset =
2886 0 : intel_gen4_compute_page_offset(dev_priv,
2887 0 : &x, &y, obj->tiling_mode,
2888 : pixel_size,
2889 : fb->pitches[0]);
2890 0 : linear_offset -= intel_crtc->dspaddr_offset;
2891 0 : if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2892 0 : dspcntr |= DISPPLANE_ROTATE_180;
2893 :
2894 0 : if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2895 0 : x += (intel_crtc->config->pipe_src_w - 1);
2896 0 : y += (intel_crtc->config->pipe_src_h - 1);
2897 :
2898 : /* Finding the last pixel of the last line of the display
2899 : data and adding to linear_offset*/
2900 0 : linear_offset +=
2901 0 : (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2902 0 : (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2903 0 : }
2904 : }
2905 :
2906 0 : intel_crtc->adjusted_x = x;
2907 0 : intel_crtc->adjusted_y = y;
2908 :
2909 0 : I915_WRITE(reg, dspcntr);
2910 :
2911 0 : I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2912 0 : I915_WRITE(DSPSURF(plane),
2913 : i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2914 0 : if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2915 0 : I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2916 0 : } else {
2917 0 : I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2918 0 : I915_WRITE(DSPLINOFF(plane), linear_offset);
2919 : }
2920 0 : POSTING_READ(reg);
2921 0 : }
2922 :
2923 0 : u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2924 : uint32_t pixel_format)
2925 : {
2926 0 : u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2927 :
2928 : /*
2929 : * The stride is either expressed as a multiple of 64 bytes
2930 : * chunks for linear buffers or in number of tiles for tiled
2931 : * buffers.
2932 : */
2933 0 : switch (fb_modifier) {
2934 : case DRM_FORMAT_MOD_NONE:
2935 0 : return 64;
2936 : case I915_FORMAT_MOD_X_TILED:
2937 0 : if (INTEL_INFO(dev)->gen == 2)
2938 0 : return 128;
2939 0 : return 512;
2940 : case I915_FORMAT_MOD_Y_TILED:
2941 : /* No need to check for old gens and Y tiling since this is
2942 : * about the display engine and those will be blocked before
2943 : * we get here.
2944 : */
2945 0 : return 128;
2946 : case I915_FORMAT_MOD_Yf_TILED:
2947 0 : if (bits_per_pixel == 8)
2948 0 : return 64;
2949 : else
2950 0 : return 128;
2951 : default:
2952 0 : MISSING_CASE(fb_modifier);
2953 0 : return 64;
2954 : }
2955 0 : }
2956 :
2957 0 : u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2958 : struct drm_i915_gem_object *obj,
2959 : unsigned int plane)
2960 : {
2961 : const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
2962 : struct i915_vma *vma;
2963 : u64 offset;
2964 :
2965 0 : if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
2966 0 : view = &i915_ggtt_view_rotated;
2967 :
2968 0 : vma = i915_gem_obj_to_ggtt_view(obj, view);
2969 0 : if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2970 : view->type))
2971 0 : return -1;
2972 :
2973 0 : offset = vma->node.start;
2974 :
2975 0 : if (plane == 1) {
2976 0 : offset += vma->ggtt_view.rotation_info.uv_start_page *
2977 : PAGE_SIZE;
2978 0 : }
2979 :
2980 0 : WARN_ON(upper_32_bits(offset));
2981 :
2982 0 : return lower_32_bits(offset);
2983 0 : }
2984 :
2985 0 : static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2986 : {
2987 0 : struct drm_device *dev = intel_crtc->base.dev;
2988 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2989 :
2990 0 : I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2991 0 : I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2992 0 : I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2993 0 : }
2994 :
2995 : /*
2996 : * This function detaches (aka. unbinds) unused scalers in hardware
2997 : */
2998 0 : static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2999 : {
3000 : struct intel_crtc_scaler_state *scaler_state;
3001 : int i;
3002 :
3003 0 : scaler_state = &intel_crtc->config->scaler_state;
3004 :
3005 : /* loop through and disable scalers that aren't in use */
3006 0 : for (i = 0; i < intel_crtc->num_scalers; i++) {
3007 0 : if (!scaler_state->scalers[i].in_use)
3008 0 : skl_detach_scaler(intel_crtc, i);
3009 : }
3010 0 : }
3011 :
3012 0 : u32 skl_plane_ctl_format(uint32_t pixel_format)
3013 : {
3014 0 : switch (pixel_format) {
3015 : case DRM_FORMAT_C8:
3016 0 : return PLANE_CTL_FORMAT_INDEXED;
3017 : case DRM_FORMAT_RGB565:
3018 0 : return PLANE_CTL_FORMAT_RGB_565;
3019 : case DRM_FORMAT_XBGR8888:
3020 0 : return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3021 : case DRM_FORMAT_XRGB8888:
3022 0 : return PLANE_CTL_FORMAT_XRGB_8888;
3023 : /*
3024 : * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3025 : * to be already pre-multiplied. We need to add a knob (or a different
3026 : * DRM_FORMAT) for user-space to configure that.
3027 : */
3028 : case DRM_FORMAT_ABGR8888:
3029 0 : return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3030 : PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3031 : case DRM_FORMAT_ARGB8888:
3032 0 : return PLANE_CTL_FORMAT_XRGB_8888 |
3033 : PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3034 : case DRM_FORMAT_XRGB2101010:
3035 0 : return PLANE_CTL_FORMAT_XRGB_2101010;
3036 : case DRM_FORMAT_XBGR2101010:
3037 0 : return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3038 : case DRM_FORMAT_YUYV:
3039 0 : return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3040 : case DRM_FORMAT_YVYU:
3041 0 : return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3042 : case DRM_FORMAT_UYVY:
3043 0 : return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3044 : case DRM_FORMAT_VYUY:
3045 0 : return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3046 : default:
3047 0 : MISSING_CASE(pixel_format);
3048 : }
3049 :
3050 0 : return 0;
3051 0 : }
3052 :
3053 0 : u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3054 : {
3055 0 : switch (fb_modifier) {
3056 : case DRM_FORMAT_MOD_NONE:
3057 : break;
3058 : case I915_FORMAT_MOD_X_TILED:
3059 0 : return PLANE_CTL_TILED_X;
3060 : case I915_FORMAT_MOD_Y_TILED:
3061 0 : return PLANE_CTL_TILED_Y;
3062 : case I915_FORMAT_MOD_Yf_TILED:
3063 0 : return PLANE_CTL_TILED_YF;
3064 : default:
3065 0 : MISSING_CASE(fb_modifier);
3066 0 : }
3067 :
3068 0 : return 0;
3069 0 : }
3070 :
3071 0 : u32 skl_plane_ctl_rotation(unsigned int rotation)
3072 : {
3073 0 : switch (rotation) {
3074 : case BIT(DRM_ROTATE_0):
3075 : break;
3076 : /*
3077 : * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3078 : * while i915 HW rotation is clockwise, thats why this swapping.
3079 : */
3080 : case BIT(DRM_ROTATE_90):
3081 0 : return PLANE_CTL_ROTATE_270;
3082 : case BIT(DRM_ROTATE_180):
3083 0 : return PLANE_CTL_ROTATE_180;
3084 : case BIT(DRM_ROTATE_270):
3085 0 : return PLANE_CTL_ROTATE_90;
3086 : default:
3087 0 : MISSING_CASE(rotation);
3088 0 : }
3089 :
3090 0 : return 0;
3091 0 : }
3092 :
3093 0 : static void skylake_update_primary_plane(struct drm_crtc *crtc,
3094 : struct drm_framebuffer *fb,
3095 : int x, int y)
3096 : {
3097 0 : struct drm_device *dev = crtc->dev;
3098 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3099 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3100 0 : struct drm_plane *plane = crtc->primary;
3101 0 : bool visible = to_intel_plane_state(plane->state)->visible;
3102 : struct drm_i915_gem_object *obj;
3103 0 : int pipe = intel_crtc->pipe;
3104 : u32 plane_ctl, stride_div, stride;
3105 : u32 tile_height, plane_offset, plane_size;
3106 : unsigned int rotation;
3107 : int x_offset, y_offset;
3108 : u32 surf_addr;
3109 0 : struct intel_crtc_state *crtc_state = intel_crtc->config;
3110 : struct intel_plane_state *plane_state;
3111 : int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
3112 : int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
3113 : int scaler_id = -1;
3114 :
3115 : plane_state = to_intel_plane_state(plane->state);
3116 :
3117 0 : if (!visible || !fb) {
3118 0 : I915_WRITE(PLANE_CTL(pipe, 0), 0);
3119 0 : I915_WRITE(PLANE_SURF(pipe, 0), 0);
3120 0 : POSTING_READ(PLANE_CTL(pipe, 0));
3121 0 : return;
3122 : }
3123 :
3124 : plane_ctl = PLANE_CTL_ENABLE |
3125 : PLANE_CTL_PIPE_GAMMA_ENABLE |
3126 : PLANE_CTL_PIPE_CSC_ENABLE;
3127 :
3128 0 : plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3129 0 : plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3130 0 : plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3131 :
3132 0 : rotation = plane->state->rotation;
3133 0 : plane_ctl |= skl_plane_ctl_rotation(rotation);
3134 :
3135 0 : obj = intel_fb_obj(fb);
3136 0 : stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3137 0 : fb->pixel_format);
3138 0 : surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3139 :
3140 0 : WARN_ON(drm_rect_width(&plane_state->src) == 0);
3141 :
3142 0 : scaler_id = plane_state->scaler_id;
3143 0 : src_x = plane_state->src.x1 >> 16;
3144 0 : src_y = plane_state->src.y1 >> 16;
3145 0 : src_w = drm_rect_width(&plane_state->src) >> 16;
3146 0 : src_h = drm_rect_height(&plane_state->src) >> 16;
3147 0 : dst_x = plane_state->dst.x1;
3148 0 : dst_y = plane_state->dst.y1;
3149 0 : dst_w = drm_rect_width(&plane_state->dst);
3150 0 : dst_h = drm_rect_height(&plane_state->dst);
3151 :
3152 0 : WARN_ON(x != src_x || y != src_y);
3153 :
3154 0 : if (intel_rotation_90_or_270(rotation)) {
3155 : /* stride = Surface height in tiles */
3156 0 : tile_height = intel_tile_height(dev, fb->pixel_format,
3157 0 : fb->modifier[0], 0);
3158 0 : stride = DIV_ROUND_UP(fb->height, tile_height);
3159 0 : x_offset = stride * tile_height - y - src_h;
3160 : y_offset = x;
3161 0 : plane_size = (src_w - 1) << 16 | (src_h - 1);
3162 0 : } else {
3163 0 : stride = fb->pitches[0] / stride_div;
3164 : x_offset = x;
3165 : y_offset = y;
3166 0 : plane_size = (src_h - 1) << 16 | (src_w - 1);
3167 : }
3168 0 : plane_offset = y_offset << 16 | x_offset;
3169 :
3170 0 : intel_crtc->adjusted_x = x_offset;
3171 0 : intel_crtc->adjusted_y = y_offset;
3172 :
3173 0 : I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3174 0 : I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3175 0 : I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3176 0 : I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3177 :
3178 0 : if (scaler_id >= 0) {
3179 : uint32_t ps_ctrl = 0;
3180 :
3181 0 : WARN_ON(!dst_w || !dst_h);
3182 0 : ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3183 0 : crtc_state->scaler_state.scalers[scaler_id].mode;
3184 0 : I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3185 0 : I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3186 0 : I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3187 0 : I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3188 0 : I915_WRITE(PLANE_POS(pipe, 0), 0);
3189 0 : } else {
3190 0 : I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3191 : }
3192 :
3193 0 : I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3194 :
3195 0 : POSTING_READ(PLANE_SURF(pipe, 0));
3196 0 : }
3197 :
3198 : /* Assume fb object is pinned & idle & fenced and just update base pointers */
3199 : static int
3200 0 : intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3201 : int x, int y, enum mode_set_atomic state)
3202 : {
3203 0 : struct drm_device *dev = crtc->dev;
3204 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3205 :
3206 0 : if (dev_priv->fbc.disable_fbc)
3207 0 : dev_priv->fbc.disable_fbc(dev_priv);
3208 :
3209 0 : dev_priv->display.update_primary_plane(crtc, fb, x, y);
3210 :
3211 0 : return 0;
3212 : }
3213 :
3214 0 : static void intel_complete_page_flips(struct drm_device *dev)
3215 : {
3216 : struct drm_crtc *crtc;
3217 :
3218 0 : for_each_crtc(dev, crtc) {
3219 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3220 0 : enum plane plane = intel_crtc->plane;
3221 :
3222 0 : intel_prepare_page_flip(dev, plane);
3223 0 : intel_finish_page_flip_plane(dev, plane);
3224 : }
3225 0 : }
3226 :
3227 0 : static void intel_update_primary_planes(struct drm_device *dev)
3228 : {
3229 : struct drm_crtc *crtc;
3230 :
3231 0 : for_each_crtc(dev, crtc) {
3232 0 : struct intel_plane *plane = to_intel_plane(crtc->primary);
3233 : struct intel_plane_state *plane_state;
3234 :
3235 0 : drm_modeset_lock_crtc(crtc, &plane->base);
3236 :
3237 0 : plane_state = to_intel_plane_state(plane->base.state);
3238 :
3239 0 : if (plane_state->base.fb)
3240 0 : plane->commit_plane(&plane->base, plane_state);
3241 :
3242 0 : drm_modeset_unlock_crtc(crtc);
3243 : }
3244 0 : }
3245 :
3246 0 : void intel_prepare_reset(struct drm_device *dev)
3247 : {
3248 : /* no reset support for gen2 */
3249 0 : if (IS_GEN2(dev))
3250 : return;
3251 :
3252 : /* reset doesn't touch the display */
3253 0 : if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3254 : return;
3255 :
3256 0 : drm_modeset_lock_all(dev);
3257 : /*
3258 : * Disabling the crtcs gracefully seems nicer. Also the
3259 : * g33 docs say we should at least disable all the planes.
3260 : */
3261 0 : intel_display_suspend(dev);
3262 0 : }
3263 :
3264 0 : void intel_finish_reset(struct drm_device *dev)
3265 : {
3266 0 : struct drm_i915_private *dev_priv = to_i915(dev);
3267 :
3268 : /*
3269 : * Flips in the rings will be nuked by the reset,
3270 : * so complete all pending flips so that user space
3271 : * will get its events and not get stuck.
3272 : */
3273 0 : intel_complete_page_flips(dev);
3274 :
3275 : /* no reset support for gen2 */
3276 0 : if (IS_GEN2(dev))
3277 0 : return;
3278 :
3279 : /* reset doesn't touch the display */
3280 0 : if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3281 : /*
3282 : * Flips in the rings have been nuked by the reset,
3283 : * so update the base address of all primary
3284 : * planes to the the last fb to make sure we're
3285 : * showing the correct fb after a reset.
3286 : *
3287 : * FIXME: Atomic will make this obsolete since we won't schedule
3288 : * CS-based flips (which might get lost in gpu resets) any more.
3289 : */
3290 0 : intel_update_primary_planes(dev);
3291 0 : return;
3292 : }
3293 :
3294 : /*
3295 : * The display has been reset as well,
3296 : * so need a full re-initialization.
3297 : */
3298 0 : intel_runtime_pm_disable_interrupts(dev_priv);
3299 0 : intel_runtime_pm_enable_interrupts(dev_priv);
3300 :
3301 0 : intel_modeset_init_hw(dev);
3302 :
3303 0 : spin_lock_irq(&dev_priv->irq_lock);
3304 0 : if (dev_priv->display.hpd_irq_setup)
3305 0 : dev_priv->display.hpd_irq_setup(dev);
3306 0 : spin_unlock_irq(&dev_priv->irq_lock);
3307 :
3308 0 : intel_display_resume(dev);
3309 :
3310 0 : intel_hpd_init(dev_priv);
3311 :
3312 0 : drm_modeset_unlock_all(dev);
3313 0 : }
3314 :
3315 : static void
3316 0 : intel_finish_fb(struct drm_framebuffer *old_fb)
3317 : {
3318 0 : struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3319 0 : struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3320 0 : bool was_interruptible = dev_priv->mm.interruptible;
3321 : int ret;
3322 :
3323 : /* Big Hammer, we also need to ensure that any pending
3324 : * MI_WAIT_FOR_EVENT inside a user batch buffer on the
3325 : * current scanout is retired before unpinning the old
3326 : * framebuffer. Note that we rely on userspace rendering
3327 : * into the buffer attached to the pipe they are waiting
3328 : * on. If not, userspace generates a GPU hang with IPEHR
3329 : * point to the MI_WAIT_FOR_EVENT.
3330 : *
3331 : * This should only fail upon a hung GPU, in which case we
3332 : * can safely continue.
3333 : */
3334 0 : dev_priv->mm.interruptible = false;
3335 0 : ret = i915_gem_object_wait_rendering(obj, true);
3336 0 : dev_priv->mm.interruptible = was_interruptible;
3337 :
3338 0 : WARN_ON(ret);
3339 0 : }
3340 :
3341 0 : static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3342 : {
3343 0 : struct drm_device *dev = crtc->dev;
3344 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3345 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3346 : bool pending;
3347 :
3348 0 : if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3349 0 : intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3350 0 : return false;
3351 :
3352 0 : spin_lock_irq(&dev->event_lock);
3353 0 : pending = to_intel_crtc(crtc)->unpin_work != NULL;
3354 0 : spin_unlock_irq(&dev->event_lock);
3355 :
3356 0 : return pending;
3357 0 : }
3358 :
3359 0 : static void intel_update_pipe_config(struct intel_crtc *crtc,
3360 : struct intel_crtc_state *old_crtc_state)
3361 : {
3362 0 : struct drm_device *dev = crtc->base.dev;
3363 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3364 : struct intel_crtc_state *pipe_config =
3365 0 : to_intel_crtc_state(crtc->base.state);
3366 :
3367 : /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3368 0 : crtc->base.mode = crtc->base.state->mode;
3369 :
3370 : DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3371 : old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3372 : pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3373 :
3374 0 : if (HAS_DDI(dev))
3375 0 : intel_set_pipe_csc(&crtc->base);
3376 :
3377 : /*
3378 : * Update pipe size and adjust fitter if needed: the reason for this is
3379 : * that in compute_mode_changes we check the native mode (not the pfit
3380 : * mode) to see if we can flip rather than do a full mode set. In the
3381 : * fastboot case, we'll flip, but if we don't update the pipesrc and
3382 : * pfit state, we'll end up with a big fb scanned out into the wrong
3383 : * sized surface.
3384 : */
3385 :
3386 0 : I915_WRITE(PIPESRC(crtc->pipe),
3387 : ((pipe_config->pipe_src_w - 1) << 16) |
3388 : (pipe_config->pipe_src_h - 1));
3389 :
3390 : /* on skylake this is done by detaching scalers */
3391 0 : if (INTEL_INFO(dev)->gen >= 9) {
3392 0 : skl_detach_scalers(crtc);
3393 :
3394 0 : if (pipe_config->pch_pfit.enabled)
3395 0 : skylake_pfit_enable(crtc);
3396 0 : } else if (HAS_PCH_SPLIT(dev)) {
3397 0 : if (pipe_config->pch_pfit.enabled)
3398 0 : ironlake_pfit_enable(crtc);
3399 0 : else if (old_crtc_state->pch_pfit.enabled)
3400 0 : ironlake_pfit_disable(crtc, true);
3401 : }
3402 0 : }
3403 :
3404 0 : static void intel_fdi_normal_train(struct drm_crtc *crtc)
3405 : {
3406 0 : struct drm_device *dev = crtc->dev;
3407 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3408 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3409 0 : int pipe = intel_crtc->pipe;
3410 : u32 reg, temp;
3411 :
3412 : /* enable normal train */
3413 0 : reg = FDI_TX_CTL(pipe);
3414 0 : temp = I915_READ(reg);
3415 0 : if (IS_IVYBRIDGE(dev)) {
3416 0 : temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3417 0 : temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3418 0 : } else {
3419 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3420 0 : temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3421 : }
3422 0 : I915_WRITE(reg, temp);
3423 :
3424 0 : reg = FDI_RX_CTL(pipe);
3425 0 : temp = I915_READ(reg);
3426 0 : if (HAS_PCH_CPT(dev)) {
3427 0 : temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3428 0 : temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3429 0 : } else {
3430 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3431 0 : temp |= FDI_LINK_TRAIN_NONE;
3432 : }
3433 0 : I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3434 :
3435 : /* wait one idle pattern time */
3436 0 : POSTING_READ(reg);
3437 0 : udelay(1000);
3438 :
3439 : /* IVB wants error correction enabled */
3440 0 : if (IS_IVYBRIDGE(dev))
3441 0 : I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3442 : FDI_FE_ERRC_ENABLE);
3443 0 : }
3444 :
3445 : /* The FDI link training functions for ILK/Ibexpeak. */
3446 0 : static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3447 : {
3448 0 : struct drm_device *dev = crtc->dev;
3449 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3450 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3451 0 : int pipe = intel_crtc->pipe;
3452 : u32 reg, temp, tries;
3453 :
3454 : /* FDI needs bits from pipe first */
3455 0 : assert_pipe_enabled(dev_priv, pipe);
3456 :
3457 : /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3458 : for train result */
3459 0 : reg = FDI_RX_IMR(pipe);
3460 0 : temp = I915_READ(reg);
3461 0 : temp &= ~FDI_RX_SYMBOL_LOCK;
3462 0 : temp &= ~FDI_RX_BIT_LOCK;
3463 0 : I915_WRITE(reg, temp);
3464 0 : I915_READ(reg);
3465 0 : udelay(150);
3466 :
3467 : /* enable CPU FDI TX and PCH FDI RX */
3468 0 : reg = FDI_TX_CTL(pipe);
3469 0 : temp = I915_READ(reg);
3470 0 : temp &= ~FDI_DP_PORT_WIDTH_MASK;
3471 0 : temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3472 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3473 : temp |= FDI_LINK_TRAIN_PATTERN_1;
3474 0 : I915_WRITE(reg, temp | FDI_TX_ENABLE);
3475 :
3476 0 : reg = FDI_RX_CTL(pipe);
3477 0 : temp = I915_READ(reg);
3478 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3479 : temp |= FDI_LINK_TRAIN_PATTERN_1;
3480 0 : I915_WRITE(reg, temp | FDI_RX_ENABLE);
3481 :
3482 0 : POSTING_READ(reg);
3483 0 : udelay(150);
3484 :
3485 : /* Ironlake workaround, enable clock pointer after FDI enable*/
3486 0 : I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3487 0 : I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3488 : FDI_RX_PHASE_SYNC_POINTER_EN);
3489 :
3490 0 : reg = FDI_RX_IIR(pipe);
3491 0 : for (tries = 0; tries < 5; tries++) {
3492 0 : temp = I915_READ(reg);
3493 : DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3494 :
3495 0 : if ((temp & FDI_RX_BIT_LOCK)) {
3496 : DRM_DEBUG_KMS("FDI train 1 done.\n");
3497 0 : I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3498 0 : break;
3499 : }
3500 : }
3501 0 : if (tries == 5)
3502 0 : DRM_ERROR("FDI train 1 fail!\n");
3503 :
3504 : /* Train 2 */
3505 : reg = FDI_TX_CTL(pipe);
3506 0 : temp = I915_READ(reg);
3507 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3508 0 : temp |= FDI_LINK_TRAIN_PATTERN_2;
3509 0 : I915_WRITE(reg, temp);
3510 :
3511 : reg = FDI_RX_CTL(pipe);
3512 0 : temp = I915_READ(reg);
3513 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3514 0 : temp |= FDI_LINK_TRAIN_PATTERN_2;
3515 0 : I915_WRITE(reg, temp);
3516 :
3517 0 : POSTING_READ(reg);
3518 0 : udelay(150);
3519 :
3520 : reg = FDI_RX_IIR(pipe);
3521 0 : for (tries = 0; tries < 5; tries++) {
3522 0 : temp = I915_READ(reg);
3523 : DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3524 :
3525 0 : if (temp & FDI_RX_SYMBOL_LOCK) {
3526 0 : I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3527 : DRM_DEBUG_KMS("FDI train 2 done.\n");
3528 0 : break;
3529 : }
3530 : }
3531 0 : if (tries == 5)
3532 0 : DRM_ERROR("FDI train 2 fail!\n");
3533 :
3534 : DRM_DEBUG_KMS("FDI train done\n");
3535 :
3536 0 : }
3537 :
3538 : static const int snb_b_fdi_train_param[] = {
3539 : FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3540 : FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3541 : FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3542 : FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3543 : };
3544 :
3545 : /* The FDI link training functions for SNB/Cougarpoint. */
3546 0 : static void gen6_fdi_link_train(struct drm_crtc *crtc)
3547 : {
3548 0 : struct drm_device *dev = crtc->dev;
3549 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3550 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3551 0 : int pipe = intel_crtc->pipe;
3552 : u32 reg, temp, i, retry;
3553 :
3554 : /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3555 : for train result */
3556 0 : reg = FDI_RX_IMR(pipe);
3557 0 : temp = I915_READ(reg);
3558 0 : temp &= ~FDI_RX_SYMBOL_LOCK;
3559 0 : temp &= ~FDI_RX_BIT_LOCK;
3560 0 : I915_WRITE(reg, temp);
3561 :
3562 0 : POSTING_READ(reg);
3563 0 : udelay(150);
3564 :
3565 : /* enable CPU FDI TX and PCH FDI RX */
3566 0 : reg = FDI_TX_CTL(pipe);
3567 0 : temp = I915_READ(reg);
3568 0 : temp &= ~FDI_DP_PORT_WIDTH_MASK;
3569 0 : temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3570 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3571 : temp |= FDI_LINK_TRAIN_PATTERN_1;
3572 0 : temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3573 : /* SNB-B */
3574 : temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3575 0 : I915_WRITE(reg, temp | FDI_TX_ENABLE);
3576 :
3577 0 : I915_WRITE(FDI_RX_MISC(pipe),
3578 : FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3579 :
3580 0 : reg = FDI_RX_CTL(pipe);
3581 0 : temp = I915_READ(reg);
3582 0 : if (HAS_PCH_CPT(dev)) {
3583 0 : temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3584 : temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3585 0 : } else {
3586 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3587 : temp |= FDI_LINK_TRAIN_PATTERN_1;
3588 : }
3589 0 : I915_WRITE(reg, temp | FDI_RX_ENABLE);
3590 :
3591 0 : POSTING_READ(reg);
3592 0 : udelay(150);
3593 :
3594 0 : for (i = 0; i < 4; i++) {
3595 : reg = FDI_TX_CTL(pipe);
3596 0 : temp = I915_READ(reg);
3597 0 : temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3598 0 : temp |= snb_b_fdi_train_param[i];
3599 0 : I915_WRITE(reg, temp);
3600 :
3601 0 : POSTING_READ(reg);
3602 0 : udelay(500);
3603 :
3604 0 : for (retry = 0; retry < 5; retry++) {
3605 0 : reg = FDI_RX_IIR(pipe);
3606 0 : temp = I915_READ(reg);
3607 : DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3608 0 : if (temp & FDI_RX_BIT_LOCK) {
3609 0 : I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3610 : DRM_DEBUG_KMS("FDI train 1 done.\n");
3611 0 : break;
3612 : }
3613 0 : udelay(50);
3614 : }
3615 0 : if (retry < 5)
3616 : break;
3617 : }
3618 0 : if (i == 4)
3619 0 : DRM_ERROR("FDI train 1 fail!\n");
3620 :
3621 : /* Train 2 */
3622 : reg = FDI_TX_CTL(pipe);
3623 0 : temp = I915_READ(reg);
3624 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3625 0 : temp |= FDI_LINK_TRAIN_PATTERN_2;
3626 0 : if (IS_GEN6(dev)) {
3627 0 : temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3628 : /* SNB-B */
3629 : temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3630 0 : }
3631 0 : I915_WRITE(reg, temp);
3632 :
3633 : reg = FDI_RX_CTL(pipe);
3634 0 : temp = I915_READ(reg);
3635 0 : if (HAS_PCH_CPT(dev)) {
3636 0 : temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3637 0 : temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3638 0 : } else {
3639 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3640 0 : temp |= FDI_LINK_TRAIN_PATTERN_2;
3641 : }
3642 0 : I915_WRITE(reg, temp);
3643 :
3644 0 : POSTING_READ(reg);
3645 0 : udelay(150);
3646 :
3647 0 : for (i = 0; i < 4; i++) {
3648 : reg = FDI_TX_CTL(pipe);
3649 0 : temp = I915_READ(reg);
3650 0 : temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3651 0 : temp |= snb_b_fdi_train_param[i];
3652 0 : I915_WRITE(reg, temp);
3653 :
3654 0 : POSTING_READ(reg);
3655 0 : udelay(500);
3656 :
3657 0 : for (retry = 0; retry < 5; retry++) {
3658 0 : reg = FDI_RX_IIR(pipe);
3659 0 : temp = I915_READ(reg);
3660 : DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3661 0 : if (temp & FDI_RX_SYMBOL_LOCK) {
3662 0 : I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3663 : DRM_DEBUG_KMS("FDI train 2 done.\n");
3664 0 : break;
3665 : }
3666 0 : udelay(50);
3667 : }
3668 0 : if (retry < 5)
3669 : break;
3670 : }
3671 0 : if (i == 4)
3672 0 : DRM_ERROR("FDI train 2 fail!\n");
3673 :
3674 : DRM_DEBUG_KMS("FDI train done.\n");
3675 0 : }
3676 :
3677 : /* Manual link training for Ivy Bridge A0 parts */
3678 0 : static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3679 : {
3680 0 : struct drm_device *dev = crtc->dev;
3681 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3682 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3683 0 : int pipe = intel_crtc->pipe;
3684 : u32 reg, temp, i, j;
3685 :
3686 : /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3687 : for train result */
3688 0 : reg = FDI_RX_IMR(pipe);
3689 0 : temp = I915_READ(reg);
3690 0 : temp &= ~FDI_RX_SYMBOL_LOCK;
3691 0 : temp &= ~FDI_RX_BIT_LOCK;
3692 0 : I915_WRITE(reg, temp);
3693 :
3694 0 : POSTING_READ(reg);
3695 0 : udelay(150);
3696 :
3697 : DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3698 : I915_READ(FDI_RX_IIR(pipe)));
3699 :
3700 : /* Try each vswing and preemphasis setting twice before moving on */
3701 0 : for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3702 : /* disable first in case we need to retry */
3703 0 : reg = FDI_TX_CTL(pipe);
3704 0 : temp = I915_READ(reg);
3705 0 : temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3706 0 : temp &= ~FDI_TX_ENABLE;
3707 0 : I915_WRITE(reg, temp);
3708 :
3709 0 : reg = FDI_RX_CTL(pipe);
3710 0 : temp = I915_READ(reg);
3711 0 : temp &= ~FDI_LINK_TRAIN_AUTO;
3712 0 : temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3713 0 : temp &= ~FDI_RX_ENABLE;
3714 0 : I915_WRITE(reg, temp);
3715 :
3716 : /* enable CPU FDI TX and PCH FDI RX */
3717 : reg = FDI_TX_CTL(pipe);
3718 0 : temp = I915_READ(reg);
3719 0 : temp &= ~FDI_DP_PORT_WIDTH_MASK;
3720 0 : temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3721 : temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3722 0 : temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3723 0 : temp |= snb_b_fdi_train_param[j/2];
3724 0 : temp |= FDI_COMPOSITE_SYNC;
3725 0 : I915_WRITE(reg, temp | FDI_TX_ENABLE);
3726 :
3727 0 : I915_WRITE(FDI_RX_MISC(pipe),
3728 : FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3729 :
3730 : reg = FDI_RX_CTL(pipe);
3731 0 : temp = I915_READ(reg);
3732 : temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3733 0 : temp |= FDI_COMPOSITE_SYNC;
3734 0 : I915_WRITE(reg, temp | FDI_RX_ENABLE);
3735 :
3736 0 : POSTING_READ(reg);
3737 0 : udelay(1); /* should be 0.5us */
3738 :
3739 0 : for (i = 0; i < 4; i++) {
3740 0 : reg = FDI_RX_IIR(pipe);
3741 0 : temp = I915_READ(reg);
3742 : DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3743 :
3744 0 : if (temp & FDI_RX_BIT_LOCK ||
3745 0 : (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3746 0 : I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3747 : DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3748 : i);
3749 0 : break;
3750 : }
3751 0 : udelay(1); /* should be 0.5us */
3752 : }
3753 0 : if (i == 4) {
3754 : DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3755 : continue;
3756 : }
3757 :
3758 : /* Train 2 */
3759 : reg = FDI_TX_CTL(pipe);
3760 0 : temp = I915_READ(reg);
3761 0 : temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3762 0 : temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3763 0 : I915_WRITE(reg, temp);
3764 :
3765 : reg = FDI_RX_CTL(pipe);
3766 0 : temp = I915_READ(reg);
3767 0 : temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3768 0 : temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3769 0 : I915_WRITE(reg, temp);
3770 :
3771 0 : POSTING_READ(reg);
3772 0 : udelay(2); /* should be 1.5us */
3773 :
3774 0 : for (i = 0; i < 4; i++) {
3775 0 : reg = FDI_RX_IIR(pipe);
3776 0 : temp = I915_READ(reg);
3777 : DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3778 :
3779 0 : if (temp & FDI_RX_SYMBOL_LOCK ||
3780 0 : (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3781 0 : I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3782 : DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3783 : i);
3784 0 : goto train_done;
3785 : }
3786 0 : udelay(2); /* should be 1.5us */
3787 : }
3788 : if (i == 4)
3789 : DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3790 0 : }
3791 :
3792 : train_done:
3793 : DRM_DEBUG_KMS("FDI train done.\n");
3794 0 : }
3795 :
3796 0 : static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3797 : {
3798 0 : struct drm_device *dev = intel_crtc->base.dev;
3799 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3800 0 : int pipe = intel_crtc->pipe;
3801 : u32 reg, temp;
3802 :
3803 :
3804 : /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3805 0 : reg = FDI_RX_CTL(pipe);
3806 0 : temp = I915_READ(reg);
3807 0 : temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3808 0 : temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3809 0 : temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3810 0 : I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3811 :
3812 0 : POSTING_READ(reg);
3813 0 : udelay(200);
3814 :
3815 : /* Switch from Rawclk to PCDclk */
3816 0 : temp = I915_READ(reg);
3817 0 : I915_WRITE(reg, temp | FDI_PCDCLK);
3818 :
3819 0 : POSTING_READ(reg);
3820 0 : udelay(200);
3821 :
3822 : /* Enable CPU FDI TX PLL, always on for Ironlake */
3823 0 : reg = FDI_TX_CTL(pipe);
3824 0 : temp = I915_READ(reg);
3825 0 : if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3826 0 : I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3827 :
3828 0 : POSTING_READ(reg);
3829 0 : udelay(100);
3830 0 : }
3831 0 : }
3832 :
3833 0 : static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3834 : {
3835 0 : struct drm_device *dev = intel_crtc->base.dev;
3836 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3837 0 : int pipe = intel_crtc->pipe;
3838 : u32 reg, temp;
3839 :
3840 : /* Switch from PCDclk to Rawclk */
3841 0 : reg = FDI_RX_CTL(pipe);
3842 0 : temp = I915_READ(reg);
3843 0 : I915_WRITE(reg, temp & ~FDI_PCDCLK);
3844 :
3845 : /* Disable CPU FDI TX PLL */
3846 0 : reg = FDI_TX_CTL(pipe);
3847 0 : temp = I915_READ(reg);
3848 0 : I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3849 :
3850 0 : POSTING_READ(reg);
3851 0 : udelay(100);
3852 :
3853 : reg = FDI_RX_CTL(pipe);
3854 0 : temp = I915_READ(reg);
3855 0 : I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3856 :
3857 : /* Wait for the clocks to turn off. */
3858 0 : POSTING_READ(reg);
3859 0 : udelay(100);
3860 0 : }
3861 :
3862 0 : static void ironlake_fdi_disable(struct drm_crtc *crtc)
3863 : {
3864 0 : struct drm_device *dev = crtc->dev;
3865 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3866 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3867 0 : int pipe = intel_crtc->pipe;
3868 : u32 reg, temp;
3869 :
3870 : /* disable CPU FDI tx and PCH FDI rx */
3871 0 : reg = FDI_TX_CTL(pipe);
3872 0 : temp = I915_READ(reg);
3873 0 : I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3874 0 : POSTING_READ(reg);
3875 :
3876 0 : reg = FDI_RX_CTL(pipe);
3877 0 : temp = I915_READ(reg);
3878 0 : temp &= ~(0x7 << 16);
3879 0 : temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3880 0 : I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3881 :
3882 0 : POSTING_READ(reg);
3883 0 : udelay(100);
3884 :
3885 : /* Ironlake workaround, disable clock pointer after downing FDI */
3886 0 : if (HAS_PCH_IBX(dev))
3887 0 : I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3888 :
3889 : /* still set train pattern 1 */
3890 : reg = FDI_TX_CTL(pipe);
3891 0 : temp = I915_READ(reg);
3892 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3893 : temp |= FDI_LINK_TRAIN_PATTERN_1;
3894 0 : I915_WRITE(reg, temp);
3895 :
3896 : reg = FDI_RX_CTL(pipe);
3897 0 : temp = I915_READ(reg);
3898 0 : if (HAS_PCH_CPT(dev)) {
3899 0 : temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3900 : temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3901 0 : } else {
3902 0 : temp &= ~FDI_LINK_TRAIN_NONE;
3903 : temp |= FDI_LINK_TRAIN_PATTERN_1;
3904 : }
3905 : /* BPC in FDI rx is consistent with that in PIPECONF */
3906 0 : temp &= ~(0x07 << 16);
3907 0 : temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3908 0 : I915_WRITE(reg, temp);
3909 :
3910 0 : POSTING_READ(reg);
3911 0 : udelay(100);
3912 0 : }
3913 :
3914 0 : bool intel_has_pending_fb_unpin(struct drm_device *dev)
3915 : {
3916 : struct intel_crtc *crtc;
3917 :
3918 : /* Note that we don't need to be called with mode_config.lock here
3919 : * as our list of CRTC objects is static for the lifetime of the
3920 : * device and so cannot disappear as we iterate. Similarly, we can
3921 : * happily treat the predicates as racy, atomic checks as userspace
3922 : * cannot claim and pin a new fb without at least acquring the
3923 : * struct_mutex and so serialising with us.
3924 : */
3925 0 : for_each_intel_crtc(dev, crtc) {
3926 0 : if (atomic_read(&crtc->unpin_work_count) == 0)
3927 : continue;
3928 :
3929 0 : if (crtc->unpin_work)
3930 0 : intel_wait_for_vblank(dev, crtc->pipe);
3931 :
3932 0 : return true;
3933 : }
3934 :
3935 0 : return false;
3936 0 : }
3937 :
3938 0 : static void page_flip_completed(struct intel_crtc *intel_crtc)
3939 : {
3940 0 : struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3941 0 : struct intel_unpin_work *work = intel_crtc->unpin_work;
3942 :
3943 : /* ensure that the unpin work is consistent wrt ->pending. */
3944 0 : smp_rmb();
3945 0 : intel_crtc->unpin_work = NULL;
3946 :
3947 0 : if (work->event)
3948 0 : drm_send_vblank_event(intel_crtc->base.dev,
3949 0 : intel_crtc->pipe,
3950 : work->event);
3951 :
3952 0 : drm_crtc_vblank_put(&intel_crtc->base);
3953 :
3954 0 : wake_up_all(&dev_priv->pending_flip_queue);
3955 0 : trace_i915_flip_complete(intel_crtc->plane,
3956 0 : work->pending_flip_obj);
3957 :
3958 0 : queue_work(dev_priv->wq, &work->work);
3959 0 : }
3960 :
3961 0 : void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3962 : {
3963 0 : struct drm_device *dev = crtc->dev;
3964 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3965 :
3966 0 : WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3967 0 : if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3968 : !intel_crtc_has_pending_flip(crtc),
3969 : 60*HZ) == 0)) {
3970 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3971 :
3972 0 : spin_lock_irq(&dev->event_lock);
3973 0 : if (intel_crtc->unpin_work) {
3974 0 : WARN_ONCE(1, "Removing stuck page flip\n");
3975 0 : page_flip_completed(intel_crtc);
3976 0 : }
3977 0 : spin_unlock_irq(&dev->event_lock);
3978 0 : }
3979 :
3980 0 : if (crtc->primary->fb) {
3981 0 : mutex_lock(&dev->struct_mutex);
3982 0 : intel_finish_fb(crtc->primary->fb);
3983 0 : mutex_unlock(&dev->struct_mutex);
3984 0 : }
3985 0 : }
3986 :
3987 : /* Program iCLKIP clock to the desired frequency */
3988 0 : static void lpt_program_iclkip(struct drm_crtc *crtc)
3989 : {
3990 0 : struct drm_device *dev = crtc->dev;
3991 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3992 0 : int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3993 : u32 divsel, phaseinc, auxdiv, phasedir = 0;
3994 : u32 temp;
3995 :
3996 0 : mutex_lock(&dev_priv->sb_lock);
3997 :
3998 : /* It is necessary to ungate the pixclk gate prior to programming
3999 : * the divisors, and gate it back when it is done.
4000 : */
4001 0 : I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4002 :
4003 : /* Disable SSCCTL */
4004 0 : intel_sbi_write(dev_priv, SBI_SSCCTL6,
4005 0 : intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
4006 : SBI_SSCCTL_DISABLE,
4007 : SBI_ICLK);
4008 :
4009 : /* 20MHz is a corner case which is out of range for the 7-bit divisor */
4010 0 : if (clock == 20000) {
4011 : auxdiv = 1;
4012 : divsel = 0x41;
4013 : phaseinc = 0x20;
4014 0 : } else {
4015 : /* The iCLK virtual clock root frequency is in MHz,
4016 : * but the adjusted_mode->crtc_clock in in KHz. To get the
4017 : * divisors, it is necessary to divide one by another, so we
4018 : * convert the virtual clock precision to KHz here for higher
4019 : * precision.
4020 : */
4021 : u32 iclk_virtual_root_freq = 172800 * 1000;
4022 : u32 iclk_pi_range = 64;
4023 : u32 desired_divisor, msb_divisor_value, pi_value;
4024 :
4025 0 : desired_divisor = (iclk_virtual_root_freq / clock);
4026 0 : msb_divisor_value = desired_divisor / iclk_pi_range;
4027 0 : pi_value = desired_divisor % iclk_pi_range;
4028 :
4029 : auxdiv = 0;
4030 0 : divsel = msb_divisor_value - 2;
4031 : phaseinc = pi_value;
4032 : }
4033 :
4034 : /* This should not happen with any sane values */
4035 0 : WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4036 : ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4037 0 : WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4038 : ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4039 :
4040 : DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4041 : clock,
4042 : auxdiv,
4043 : divsel,
4044 : phasedir,
4045 : phaseinc);
4046 :
4047 : /* Program SSCDIVINTPHASE6 */
4048 0 : temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4049 0 : temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4050 0 : temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4051 0 : temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4052 0 : temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4053 : temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4054 0 : temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4055 0 : intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4056 :
4057 : /* Program SSCAUXDIV */
4058 0 : temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4059 0 : temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4060 0 : temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4061 0 : intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4062 :
4063 : /* Enable modulator and associated divider */
4064 0 : temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4065 0 : temp &= ~SBI_SSCCTL_DISABLE;
4066 0 : intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4067 :
4068 : /* Wait for initialization time */
4069 0 : udelay(24);
4070 :
4071 0 : I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4072 :
4073 0 : mutex_unlock(&dev_priv->sb_lock);
4074 0 : }
4075 :
4076 0 : static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4077 : enum pipe pch_transcoder)
4078 : {
4079 0 : struct drm_device *dev = crtc->base.dev;
4080 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4081 0 : enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4082 :
4083 0 : I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4084 : I915_READ(HTOTAL(cpu_transcoder)));
4085 0 : I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4086 : I915_READ(HBLANK(cpu_transcoder)));
4087 0 : I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4088 : I915_READ(HSYNC(cpu_transcoder)));
4089 :
4090 0 : I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4091 : I915_READ(VTOTAL(cpu_transcoder)));
4092 0 : I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4093 : I915_READ(VBLANK(cpu_transcoder)));
4094 0 : I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4095 : I915_READ(VSYNC(cpu_transcoder)));
4096 0 : I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4097 : I915_READ(VSYNCSHIFT(cpu_transcoder)));
4098 0 : }
4099 :
4100 0 : static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4101 : {
4102 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4103 : uint32_t temp;
4104 :
4105 0 : temp = I915_READ(SOUTH_CHICKEN1);
4106 0 : if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4107 0 : return;
4108 :
4109 0 : WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4110 0 : WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4111 :
4112 0 : temp &= ~FDI_BC_BIFURCATION_SELECT;
4113 0 : if (enable)
4114 0 : temp |= FDI_BC_BIFURCATION_SELECT;
4115 :
4116 : DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4117 0 : I915_WRITE(SOUTH_CHICKEN1, temp);
4118 0 : POSTING_READ(SOUTH_CHICKEN1);
4119 0 : }
4120 :
4121 0 : static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4122 : {
4123 0 : struct drm_device *dev = intel_crtc->base.dev;
4124 :
4125 0 : switch (intel_crtc->pipe) {
4126 : case PIPE_A:
4127 : break;
4128 : case PIPE_B:
4129 0 : if (intel_crtc->config->fdi_lanes > 2)
4130 0 : cpt_set_fdi_bc_bifurcation(dev, false);
4131 : else
4132 0 : cpt_set_fdi_bc_bifurcation(dev, true);
4133 :
4134 : break;
4135 : case PIPE_C:
4136 0 : cpt_set_fdi_bc_bifurcation(dev, true);
4137 :
4138 0 : break;
4139 : default:
4140 0 : BUG();
4141 : }
4142 0 : }
4143 :
4144 : /*
4145 : * Enable PCH resources required for PCH ports:
4146 : * - PCH PLLs
4147 : * - FDI training & RX/TX
4148 : * - update transcoder timings
4149 : * - DP transcoding bits
4150 : * - transcoder
4151 : */
4152 0 : static void ironlake_pch_enable(struct drm_crtc *crtc)
4153 : {
4154 0 : struct drm_device *dev = crtc->dev;
4155 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4156 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4157 0 : int pipe = intel_crtc->pipe;
4158 : u32 reg, temp;
4159 :
4160 0 : assert_pch_transcoder_disabled(dev_priv, pipe);
4161 :
4162 0 : if (IS_IVYBRIDGE(dev))
4163 0 : ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4164 :
4165 : /* Write the TU size bits before fdi link training, so that error
4166 : * detection works. */
4167 0 : I915_WRITE(FDI_RX_TUSIZE1(pipe),
4168 : I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4169 :
4170 : /* For PCH output, training FDI link */
4171 0 : dev_priv->display.fdi_link_train(crtc);
4172 :
4173 : /* We need to program the right clock selection before writing the pixel
4174 : * mutliplier into the DPLL. */
4175 0 : if (HAS_PCH_CPT(dev)) {
4176 : u32 sel;
4177 :
4178 0 : temp = I915_READ(PCH_DPLL_SEL);
4179 0 : temp |= TRANS_DPLL_ENABLE(pipe);
4180 0 : sel = TRANS_DPLLB_SEL(pipe);
4181 0 : if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4182 0 : temp |= sel;
4183 : else
4184 0 : temp &= ~sel;
4185 0 : I915_WRITE(PCH_DPLL_SEL, temp);
4186 0 : }
4187 :
4188 : /* XXX: pch pll's can be enabled any time before we enable the PCH
4189 : * transcoder, and we actually should do this to not upset any PCH
4190 : * transcoder that already use the clock when we share it.
4191 : *
4192 : * Note that enable_shared_dpll tries to do the right thing, but
4193 : * get_shared_dpll unconditionally resets the pll - we need that to have
4194 : * the right LVDS enable sequence. */
4195 0 : intel_enable_shared_dpll(intel_crtc);
4196 :
4197 : /* set transcoder timing, panel must allow it */
4198 0 : assert_panel_unlocked(dev_priv, pipe);
4199 0 : ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4200 :
4201 0 : intel_fdi_normal_train(crtc);
4202 :
4203 : /* For PCH DP, enable TRANS_DP_CTL */
4204 0 : if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4205 0 : u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4206 0 : reg = TRANS_DP_CTL(pipe);
4207 0 : temp = I915_READ(reg);
4208 0 : temp &= ~(TRANS_DP_PORT_SEL_MASK |
4209 : TRANS_DP_SYNC_MASK |
4210 : TRANS_DP_BPC_MASK);
4211 0 : temp |= TRANS_DP_OUTPUT_ENABLE;
4212 0 : temp |= bpc << 9; /* same format but at 11:9 */
4213 :
4214 0 : if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
4215 0 : temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4216 0 : if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
4217 0 : temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4218 :
4219 0 : switch (intel_trans_dp_port_sel(crtc)) {
4220 : case PCH_DP_B:
4221 : temp |= TRANS_DP_PORT_SEL_B;
4222 0 : break;
4223 : case PCH_DP_C:
4224 0 : temp |= TRANS_DP_PORT_SEL_C;
4225 0 : break;
4226 : case PCH_DP_D:
4227 0 : temp |= TRANS_DP_PORT_SEL_D;
4228 0 : break;
4229 : default:
4230 0 : BUG();
4231 : }
4232 :
4233 0 : I915_WRITE(reg, temp);
4234 0 : }
4235 :
4236 0 : ironlake_enable_pch_transcoder(dev_priv, pipe);
4237 0 : }
4238 :
4239 0 : static void lpt_pch_enable(struct drm_crtc *crtc)
4240 : {
4241 0 : struct drm_device *dev = crtc->dev;
4242 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4243 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4244 0 : enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4245 :
4246 0 : assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4247 :
4248 0 : lpt_program_iclkip(crtc);
4249 :
4250 : /* Set transcoder timing. */
4251 0 : ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4252 :
4253 0 : lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4254 0 : }
4255 :
4256 0 : struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4257 : struct intel_crtc_state *crtc_state)
4258 : {
4259 0 : struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4260 : struct intel_shared_dpll *pll;
4261 : struct intel_shared_dpll_config *shared_dpll;
4262 : enum intel_dpll_id i;
4263 0 : int max = dev_priv->num_shared_dpll;
4264 :
4265 0 : shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4266 :
4267 0 : if (HAS_PCH_IBX(dev_priv->dev)) {
4268 : /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4269 0 : i = (enum intel_dpll_id) crtc->pipe;
4270 0 : pll = &dev_priv->shared_dplls[i];
4271 :
4272 : DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4273 : crtc->base.base.id, pll->name);
4274 :
4275 0 : WARN_ON(shared_dpll[i].crtc_mask);
4276 :
4277 0 : goto found;
4278 : }
4279 :
4280 0 : if (IS_BROXTON(dev_priv->dev)) {
4281 : /* PLL is attached to port in bxt */
4282 : struct intel_encoder *encoder;
4283 : struct intel_digital_port *intel_dig_port;
4284 :
4285 0 : encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4286 0 : if (WARN_ON(!encoder))
4287 0 : return NULL;
4288 :
4289 0 : intel_dig_port = enc_to_dig_port(&encoder->base);
4290 : /* 1:1 mapping between ports and PLLs */
4291 0 : i = (enum intel_dpll_id)intel_dig_port->port;
4292 0 : pll = &dev_priv->shared_dplls[i];
4293 : DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4294 : crtc->base.base.id, pll->name);
4295 0 : WARN_ON(shared_dpll[i].crtc_mask);
4296 :
4297 0 : goto found;
4298 0 : } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4299 : /* Do not consider SPLL */
4300 0 : max = 2;
4301 :
4302 0 : for (i = 0; i < max; i++) {
4303 0 : pll = &dev_priv->shared_dplls[i];
4304 :
4305 : /* Only want to check enabled timings first */
4306 0 : if (shared_dpll[i].crtc_mask == 0)
4307 : continue;
4308 :
4309 0 : if (memcmp(&crtc_state->dpll_hw_state,
4310 : &shared_dpll[i].hw_state,
4311 0 : sizeof(crtc_state->dpll_hw_state)) == 0) {
4312 : DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4313 : crtc->base.base.id, pll->name,
4314 : shared_dpll[i].crtc_mask,
4315 : pll->active);
4316 : goto found;
4317 : }
4318 : }
4319 :
4320 : /* Ok no matching timings, maybe there's a free one? */
4321 0 : for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4322 0 : pll = &dev_priv->shared_dplls[i];
4323 0 : if (shared_dpll[i].crtc_mask == 0) {
4324 : DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4325 : crtc->base.base.id, pll->name);
4326 : goto found;
4327 : }
4328 : }
4329 :
4330 0 : return NULL;
4331 :
4332 : found:
4333 0 : if (shared_dpll[i].crtc_mask == 0)
4334 0 : shared_dpll[i].hw_state =
4335 0 : crtc_state->dpll_hw_state;
4336 :
4337 0 : crtc_state->shared_dpll = i;
4338 : DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4339 : pipe_name(crtc->pipe));
4340 :
4341 0 : shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
4342 :
4343 0 : return pll;
4344 0 : }
4345 :
4346 0 : static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4347 : {
4348 0 : struct drm_i915_private *dev_priv = to_i915(state->dev);
4349 : struct intel_shared_dpll_config *shared_dpll;
4350 : struct intel_shared_dpll *pll;
4351 : enum intel_dpll_id i;
4352 :
4353 0 : if (!to_intel_atomic_state(state)->dpll_set)
4354 0 : return;
4355 :
4356 0 : shared_dpll = to_intel_atomic_state(state)->shared_dpll;
4357 0 : for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4358 0 : pll = &dev_priv->shared_dplls[i];
4359 0 : pll->config = shared_dpll[i];
4360 : }
4361 0 : }
4362 :
4363 0 : static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4364 : {
4365 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4366 0 : int dslreg = PIPEDSL(pipe);
4367 : u32 temp;
4368 :
4369 0 : temp = I915_READ(dslreg);
4370 0 : udelay(500);
4371 0 : if (wait_for(I915_READ(dslreg) != temp, 5)) {
4372 0 : if (wait_for(I915_READ(dslreg) != temp, 5))
4373 0 : DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4374 : }
4375 0 : }
4376 :
4377 : static int
4378 0 : skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4379 : unsigned scaler_user, int *scaler_id, unsigned int rotation,
4380 : int src_w, int src_h, int dst_w, int dst_h)
4381 : {
4382 : struct intel_crtc_scaler_state *scaler_state =
4383 0 : &crtc_state->scaler_state;
4384 : #ifdef DRMDEBUG
4385 : struct intel_crtc *intel_crtc =
4386 : to_intel_crtc(crtc_state->base.crtc);
4387 : #endif
4388 : int need_scaling;
4389 :
4390 0 : need_scaling = intel_rotation_90_or_270(rotation) ?
4391 0 : (src_h != dst_w || src_w != dst_h):
4392 0 : (src_w != dst_w || src_h != dst_h);
4393 :
4394 : /*
4395 : * if plane is being disabled or scaler is no more required or force detach
4396 : * - free scaler binded to this plane/crtc
4397 : * - in order to do this, update crtc->scaler_usage
4398 : *
4399 : * Here scaler state in crtc_state is set free so that
4400 : * scaler can be assigned to other user. Actual register
4401 : * update to free the scaler is done in plane/panel-fit programming.
4402 : * For this purpose crtc/plane_state->scaler_id isn't reset here.
4403 : */
4404 0 : if (force_detach || !need_scaling) {
4405 0 : if (*scaler_id >= 0) {
4406 0 : scaler_state->scaler_users &= ~(1 << scaler_user);
4407 0 : scaler_state->scalers[*scaler_id].in_use = 0;
4408 :
4409 : DRM_DEBUG_KMS("scaler_user index %u.%u: "
4410 : "Staged freeing scaler id %d scaler_users = 0x%x\n",
4411 : intel_crtc->pipe, scaler_user, *scaler_id,
4412 : scaler_state->scaler_users);
4413 0 : *scaler_id = -1;
4414 0 : }
4415 0 : return 0;
4416 : }
4417 :
4418 : /* range checks */
4419 0 : if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4420 0 : dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4421 :
4422 0 : src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4423 0 : dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4424 : DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4425 : "size is out of scaler range\n",
4426 : intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4427 0 : return -EINVAL;
4428 : }
4429 :
4430 : /* mark this plane as a scaler user in crtc_state */
4431 0 : scaler_state->scaler_users |= (1 << scaler_user);
4432 : DRM_DEBUG_KMS("scaler_user index %u.%u: "
4433 : "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4434 : intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4435 : scaler_state->scaler_users);
4436 :
4437 0 : return 0;
4438 0 : }
4439 :
4440 : /**
4441 : * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4442 : *
4443 : * @state: crtc's scaler state
4444 : *
4445 : * Return
4446 : * 0 - scaler_usage updated successfully
4447 : * error - requested scaling cannot be supported or other error condition
4448 : */
4449 0 : int skl_update_scaler_crtc(struct intel_crtc_state *state)
4450 : {
4451 : #ifdef DRMDEBUG
4452 : struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4453 : #endif
4454 0 : const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4455 :
4456 : DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4457 : intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4458 :
4459 0 : return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4460 0 : &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4461 0 : state->pipe_src_w, state->pipe_src_h,
4462 0 : adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4463 : }
4464 :
4465 : /**
4466 : * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4467 : *
4468 : * @state: crtc's scaler state
4469 : * @plane_state: atomic plane state to update
4470 : *
4471 : * Return
4472 : * 0 - scaler_usage updated successfully
4473 : * error - requested scaling cannot be supported or other error condition
4474 : */
4475 0 : static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4476 : struct intel_plane_state *plane_state)
4477 : {
4478 : #ifdef DRMDEBUG
4479 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4480 : #endif
4481 : struct intel_plane *intel_plane =
4482 0 : to_intel_plane(plane_state->base.plane);
4483 0 : struct drm_framebuffer *fb = plane_state->base.fb;
4484 : int ret;
4485 :
4486 0 : bool force_detach = !fb || !plane_state->visible;
4487 :
4488 : DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4489 : intel_plane->base.base.id, intel_crtc->pipe,
4490 : drm_plane_index(&intel_plane->base));
4491 :
4492 0 : ret = skl_update_scaler(crtc_state, force_detach,
4493 0 : drm_plane_index(&intel_plane->base),
4494 0 : &plane_state->scaler_id,
4495 0 : plane_state->base.rotation,
4496 0 : drm_rect_width(&plane_state->src) >> 16,
4497 0 : drm_rect_height(&plane_state->src) >> 16,
4498 0 : drm_rect_width(&plane_state->dst),
4499 0 : drm_rect_height(&plane_state->dst));
4500 :
4501 0 : if (ret || plane_state->scaler_id < 0)
4502 0 : return ret;
4503 :
4504 : /* check colorkey */
4505 0 : if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4506 : DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4507 : intel_plane->base.base.id);
4508 0 : return -EINVAL;
4509 : }
4510 :
4511 : /* Check src format */
4512 0 : switch (fb->pixel_format) {
4513 : case DRM_FORMAT_RGB565:
4514 : case DRM_FORMAT_XBGR8888:
4515 : case DRM_FORMAT_XRGB8888:
4516 : case DRM_FORMAT_ABGR8888:
4517 : case DRM_FORMAT_ARGB8888:
4518 : case DRM_FORMAT_XRGB2101010:
4519 : case DRM_FORMAT_XBGR2101010:
4520 : case DRM_FORMAT_YUYV:
4521 : case DRM_FORMAT_YVYU:
4522 : case DRM_FORMAT_UYVY:
4523 : case DRM_FORMAT_VYUY:
4524 : break;
4525 : default:
4526 : DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4527 : intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4528 0 : return -EINVAL;
4529 : }
4530 :
4531 0 : return 0;
4532 0 : }
4533 :
4534 0 : static void skylake_scaler_disable(struct intel_crtc *crtc)
4535 : {
4536 : int i;
4537 :
4538 0 : for (i = 0; i < crtc->num_scalers; i++)
4539 0 : skl_detach_scaler(crtc, i);
4540 0 : }
4541 :
4542 0 : static void skylake_pfit_enable(struct intel_crtc *crtc)
4543 : {
4544 0 : struct drm_device *dev = crtc->base.dev;
4545 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4546 0 : int pipe = crtc->pipe;
4547 : struct intel_crtc_scaler_state *scaler_state =
4548 0 : &crtc->config->scaler_state;
4549 :
4550 : DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4551 :
4552 0 : if (crtc->config->pch_pfit.enabled) {
4553 : int id;
4554 :
4555 0 : if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4556 0 : DRM_ERROR("Requesting pfit without getting a scaler first\n");
4557 0 : return;
4558 : }
4559 :
4560 0 : id = scaler_state->scaler_id;
4561 0 : I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4562 : PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4563 0 : I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4564 0 : I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4565 :
4566 : DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4567 0 : }
4568 0 : }
4569 :
4570 0 : static void ironlake_pfit_enable(struct intel_crtc *crtc)
4571 : {
4572 0 : struct drm_device *dev = crtc->base.dev;
4573 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4574 0 : int pipe = crtc->pipe;
4575 :
4576 0 : if (crtc->config->pch_pfit.enabled) {
4577 : /* Force use of hard-coded filter coefficients
4578 : * as some pre-programmed values are broken,
4579 : * e.g. x201.
4580 : */
4581 0 : if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4582 0 : I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4583 : PF_PIPE_SEL_IVB(pipe));
4584 : else
4585 0 : I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4586 0 : I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4587 0 : I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4588 0 : }
4589 0 : }
4590 :
4591 0 : void hsw_enable_ips(struct intel_crtc *crtc)
4592 : {
4593 0 : struct drm_device *dev = crtc->base.dev;
4594 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4595 :
4596 0 : if (!crtc->config->ips_enabled)
4597 0 : return;
4598 :
4599 : /* We can only enable IPS after we enable a plane and wait for a vblank */
4600 0 : intel_wait_for_vblank(dev, crtc->pipe);
4601 :
4602 0 : assert_plane_enabled(dev_priv, crtc->plane);
4603 0 : if (IS_BROADWELL(dev)) {
4604 0 : mutex_lock(&dev_priv->rps.hw_lock);
4605 0 : WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4606 0 : mutex_unlock(&dev_priv->rps.hw_lock);
4607 : /* Quoting Art Runyan: "its not safe to expect any particular
4608 : * value in IPS_CTL bit 31 after enabling IPS through the
4609 : * mailbox." Moreover, the mailbox may return a bogus state,
4610 : * so we need to just enable it and continue on.
4611 : */
4612 0 : } else {
4613 0 : I915_WRITE(IPS_CTL, IPS_ENABLE);
4614 : /* The bit only becomes 1 in the next vblank, so this wait here
4615 : * is essentially intel_wait_for_vblank. If we don't have this
4616 : * and don't wait for vblanks until the end of crtc_enable, then
4617 : * the HW state readout code will complain that the expected
4618 : * IPS_CTL value is not the one we read. */
4619 0 : if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4620 0 : DRM_ERROR("Timed out waiting for IPS enable\n");
4621 : }
4622 0 : }
4623 :
4624 0 : void hsw_disable_ips(struct intel_crtc *crtc)
4625 : {
4626 0 : struct drm_device *dev = crtc->base.dev;
4627 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4628 :
4629 0 : if (!crtc->config->ips_enabled)
4630 0 : return;
4631 :
4632 0 : assert_plane_enabled(dev_priv, crtc->plane);
4633 0 : if (IS_BROADWELL(dev)) {
4634 0 : mutex_lock(&dev_priv->rps.hw_lock);
4635 0 : WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4636 0 : mutex_unlock(&dev_priv->rps.hw_lock);
4637 : /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4638 0 : if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4639 0 : DRM_ERROR("Timed out waiting for IPS disable\n");
4640 : } else {
4641 0 : I915_WRITE(IPS_CTL, 0);
4642 0 : POSTING_READ(IPS_CTL);
4643 : }
4644 :
4645 : /* We need to wait for a vblank before we can disable the plane. */
4646 0 : intel_wait_for_vblank(dev, crtc->pipe);
4647 0 : }
4648 :
4649 : /** Loads the palette/gamma unit for the CRTC with the prepared values */
4650 0 : static void intel_crtc_load_lut(struct drm_crtc *crtc)
4651 : {
4652 0 : struct drm_device *dev = crtc->dev;
4653 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4654 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4655 0 : enum pipe pipe = intel_crtc->pipe;
4656 : int i;
4657 : bool reenable_ips = false;
4658 :
4659 : /* The clocks have to be on to load the palette. */
4660 0 : if (!crtc->state->active)
4661 0 : return;
4662 :
4663 0 : if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4664 0 : if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4665 0 : assert_dsi_pll_enabled(dev_priv);
4666 : else
4667 0 : assert_pll_enabled(dev_priv, pipe);
4668 : }
4669 :
4670 : /* Workaround : Do not read or write the pipe palette/gamma data while
4671 : * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4672 : */
4673 0 : if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4674 0 : ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4675 : GAMMA_MODE_MODE_SPLIT)) {
4676 0 : hsw_disable_ips(intel_crtc);
4677 : reenable_ips = true;
4678 0 : }
4679 :
4680 0 : for (i = 0; i < 256; i++) {
4681 : u32 palreg;
4682 :
4683 0 : if (HAS_GMCH_DISPLAY(dev))
4684 0 : palreg = PALETTE(pipe, i);
4685 : else
4686 0 : palreg = LGC_PALETTE(pipe, i);
4687 :
4688 0 : I915_WRITE(palreg,
4689 : (intel_crtc->lut_r[i] << 16) |
4690 : (intel_crtc->lut_g[i] << 8) |
4691 : intel_crtc->lut_b[i]);
4692 : }
4693 :
4694 0 : if (reenable_ips)
4695 0 : hsw_enable_ips(intel_crtc);
4696 0 : }
4697 :
4698 0 : static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4699 : {
4700 0 : if (intel_crtc->overlay) {
4701 0 : struct drm_device *dev = intel_crtc->base.dev;
4702 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4703 :
4704 0 : mutex_lock(&dev->struct_mutex);
4705 0 : dev_priv->mm.interruptible = false;
4706 0 : (void) intel_overlay_switch_off(intel_crtc->overlay);
4707 0 : dev_priv->mm.interruptible = true;
4708 0 : mutex_unlock(&dev->struct_mutex);
4709 0 : }
4710 :
4711 : /* Let userspace switch the overlay on again. In most cases userspace
4712 : * has to recompute where to put it anyway.
4713 : */
4714 0 : }
4715 :
4716 : /**
4717 : * intel_post_enable_primary - Perform operations after enabling primary plane
4718 : * @crtc: the CRTC whose primary plane was just enabled
4719 : *
4720 : * Performs potentially sleeping operations that must be done after the primary
4721 : * plane is enabled, such as updating FBC and IPS. Note that this may be
4722 : * called due to an explicit primary plane update, or due to an implicit
4723 : * re-enable that is caused when a sprite plane is updated to no longer
4724 : * completely hide the primary plane.
4725 : */
4726 : static void
4727 0 : intel_post_enable_primary(struct drm_crtc *crtc)
4728 : {
4729 0 : struct drm_device *dev = crtc->dev;
4730 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4731 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4732 0 : int pipe = intel_crtc->pipe;
4733 :
4734 : /*
4735 : * BDW signals flip done immediately if the plane
4736 : * is disabled, even if the plane enable is already
4737 : * armed to occur at the next vblank :(
4738 : */
4739 0 : if (IS_BROADWELL(dev))
4740 0 : intel_wait_for_vblank(dev, pipe);
4741 :
4742 : /*
4743 : * FIXME IPS should be fine as long as one plane is
4744 : * enabled, but in practice it seems to have problems
4745 : * when going from primary only to sprite only and vice
4746 : * versa.
4747 : */
4748 0 : hsw_enable_ips(intel_crtc);
4749 :
4750 : /*
4751 : * Gen2 reports pipe underruns whenever all planes are disabled.
4752 : * So don't enable underrun reporting before at least some planes
4753 : * are enabled.
4754 : * FIXME: Need to fix the logic to work when we turn off all planes
4755 : * but leave the pipe running.
4756 : */
4757 0 : if (IS_GEN2(dev))
4758 0 : intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4759 :
4760 : /* Underruns don't raise interrupts, so check manually. */
4761 0 : if (HAS_GMCH_DISPLAY(dev))
4762 0 : i9xx_check_fifo_underruns(dev_priv);
4763 0 : }
4764 :
4765 : /**
4766 : * intel_pre_disable_primary - Perform operations before disabling primary plane
4767 : * @crtc: the CRTC whose primary plane is to be disabled
4768 : *
4769 : * Performs potentially sleeping operations that must be done before the
4770 : * primary plane is disabled, such as updating FBC and IPS. Note that this may
4771 : * be called due to an explicit primary plane update, or due to an implicit
4772 : * disable that is caused when a sprite plane completely hides the primary
4773 : * plane.
4774 : */
4775 : static void
4776 0 : intel_pre_disable_primary(struct drm_crtc *crtc)
4777 : {
4778 0 : struct drm_device *dev = crtc->dev;
4779 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4780 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4781 0 : int pipe = intel_crtc->pipe;
4782 :
4783 : /*
4784 : * Gen2 reports pipe underruns whenever all planes are disabled.
4785 : * So diasble underrun reporting before all the planes get disabled.
4786 : * FIXME: Need to fix the logic to work when we turn off all planes
4787 : * but leave the pipe running.
4788 : */
4789 0 : if (IS_GEN2(dev))
4790 0 : intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4791 :
4792 : /*
4793 : * Vblank time updates from the shadow to live plane control register
4794 : * are blocked if the memory self-refresh mode is active at that
4795 : * moment. So to make sure the plane gets truly disabled, disable
4796 : * first the self-refresh mode. The self-refresh enable bit in turn
4797 : * will be checked/applied by the HW only at the next frame start
4798 : * event which is after the vblank start event, so we need to have a
4799 : * wait-for-vblank between disabling the plane and the pipe.
4800 : */
4801 0 : if (HAS_GMCH_DISPLAY(dev)) {
4802 0 : intel_set_memory_cxsr(dev_priv, false);
4803 0 : dev_priv->wm.vlv.cxsr = false;
4804 0 : intel_wait_for_vblank(dev, pipe);
4805 0 : }
4806 :
4807 : /*
4808 : * FIXME IPS should be fine as long as one plane is
4809 : * enabled, but in practice it seems to have problems
4810 : * when going from primary only to sprite only and vice
4811 : * versa.
4812 : */
4813 0 : hsw_disable_ips(intel_crtc);
4814 0 : }
4815 :
4816 0 : static void intel_post_plane_update(struct intel_crtc *crtc)
4817 : {
4818 0 : struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4819 0 : struct drm_device *dev = crtc->base.dev;
4820 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4821 : struct drm_plane *plane;
4822 :
4823 0 : if (atomic->wait_vblank)
4824 0 : intel_wait_for_vblank(dev, crtc->pipe);
4825 :
4826 0 : intel_frontbuffer_flip(dev, atomic->fb_bits);
4827 :
4828 0 : if (atomic->disable_cxsr)
4829 0 : crtc->wm.cxsr_allowed = true;
4830 :
4831 0 : if (crtc->atomic.update_wm_post)
4832 0 : intel_update_watermarks(&crtc->base);
4833 :
4834 0 : if (atomic->update_fbc)
4835 0 : intel_fbc_update(dev_priv);
4836 :
4837 0 : if (atomic->post_enable_primary)
4838 0 : intel_post_enable_primary(&crtc->base);
4839 :
4840 0 : drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
4841 0 : intel_update_sprite_watermarks(plane, &crtc->base,
4842 : 0, 0, 0, false, false);
4843 :
4844 0 : memset(atomic, 0, sizeof(*atomic));
4845 0 : }
4846 :
4847 0 : static void intel_pre_plane_update(struct intel_crtc *crtc)
4848 : {
4849 0 : struct drm_device *dev = crtc->base.dev;
4850 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4851 0 : struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4852 : struct drm_plane *p;
4853 :
4854 : /* Track fb's for any planes being disabled */
4855 0 : drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
4856 0 : struct intel_plane *plane = to_intel_plane(p);
4857 :
4858 0 : mutex_lock(&dev->struct_mutex);
4859 0 : i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
4860 0 : plane->frontbuffer_bit);
4861 0 : mutex_unlock(&dev->struct_mutex);
4862 0 : }
4863 :
4864 0 : if (atomic->wait_for_flips)
4865 0 : intel_crtc_wait_for_pending_flips(&crtc->base);
4866 :
4867 0 : if (atomic->disable_fbc)
4868 0 : intel_fbc_disable_crtc(crtc);
4869 :
4870 0 : if (crtc->atomic.disable_ips)
4871 0 : hsw_disable_ips(crtc);
4872 :
4873 0 : if (atomic->pre_disable_primary)
4874 0 : intel_pre_disable_primary(&crtc->base);
4875 :
4876 0 : if (atomic->disable_cxsr) {
4877 0 : crtc->wm.cxsr_allowed = false;
4878 0 : intel_set_memory_cxsr(dev_priv, false);
4879 0 : }
4880 0 : }
4881 :
4882 0 : static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4883 : {
4884 0 : struct drm_device *dev = crtc->dev;
4885 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4886 : struct drm_plane *p;
4887 0 : int pipe = intel_crtc->pipe;
4888 :
4889 0 : intel_crtc_dpms_overlay_disable(intel_crtc);
4890 :
4891 0 : drm_for_each_plane_mask(p, dev, plane_mask)
4892 0 : to_intel_plane(p)->disable_plane(p, crtc);
4893 :
4894 : /*
4895 : * FIXME: Once we grow proper nuclear flip support out of this we need
4896 : * to compute the mask of flip planes precisely. For the time being
4897 : * consider this a flip to a NULL plane.
4898 : */
4899 0 : intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4900 0 : }
4901 :
4902 0 : static void ironlake_crtc_enable(struct drm_crtc *crtc)
4903 : {
4904 0 : struct drm_device *dev = crtc->dev;
4905 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4906 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4907 : struct intel_encoder *encoder;
4908 0 : int pipe = intel_crtc->pipe;
4909 :
4910 0 : if (WARN_ON(intel_crtc->active))
4911 0 : return;
4912 :
4913 0 : if (intel_crtc->config->has_pch_encoder)
4914 0 : intel_prepare_shared_dpll(intel_crtc);
4915 :
4916 0 : if (intel_crtc->config->has_dp_encoder)
4917 0 : intel_dp_set_m_n(intel_crtc, M1_N1);
4918 :
4919 0 : intel_set_pipe_timings(intel_crtc);
4920 :
4921 0 : if (intel_crtc->config->has_pch_encoder) {
4922 0 : intel_cpu_transcoder_set_m_n(intel_crtc,
4923 0 : &intel_crtc->config->fdi_m_n, NULL);
4924 0 : }
4925 :
4926 0 : ironlake_set_pipeconf(crtc);
4927 :
4928 0 : intel_crtc->active = true;
4929 :
4930 0 : intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4931 0 : intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4932 :
4933 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
4934 0 : if (encoder->pre_enable)
4935 0 : encoder->pre_enable(encoder);
4936 :
4937 0 : if (intel_crtc->config->has_pch_encoder) {
4938 : /* Note: FDI PLL enabling _must_ be done before we enable the
4939 : * cpu pipes, hence this is separate from all the other fdi/pch
4940 : * enabling. */
4941 0 : ironlake_fdi_pll_enable(intel_crtc);
4942 0 : } else {
4943 0 : assert_fdi_tx_disabled(dev_priv, pipe);
4944 0 : assert_fdi_rx_disabled(dev_priv, pipe);
4945 : }
4946 :
4947 0 : ironlake_pfit_enable(intel_crtc);
4948 :
4949 : /*
4950 : * On ILK+ LUT must be loaded before the pipe is running but with
4951 : * clocks enabled
4952 : */
4953 0 : intel_crtc_load_lut(crtc);
4954 :
4955 0 : intel_update_watermarks(crtc);
4956 0 : intel_enable_pipe(intel_crtc);
4957 :
4958 0 : if (intel_crtc->config->has_pch_encoder)
4959 0 : ironlake_pch_enable(crtc);
4960 :
4961 0 : assert_vblank_disabled(crtc);
4962 0 : drm_crtc_vblank_on(crtc);
4963 :
4964 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
4965 0 : encoder->enable(encoder);
4966 :
4967 0 : if (HAS_PCH_CPT(dev))
4968 0 : cpt_verify_modeset(dev, intel_crtc->pipe);
4969 0 : }
4970 :
4971 : /* IPS only exists on ULT machines and is tied to pipe A. */
4972 0 : static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4973 : {
4974 0 : return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4975 : }
4976 :
4977 0 : static void haswell_crtc_enable(struct drm_crtc *crtc)
4978 : {
4979 0 : struct drm_device *dev = crtc->dev;
4980 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4981 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4982 : struct intel_encoder *encoder;
4983 0 : int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4984 : struct intel_crtc_state *pipe_config =
4985 0 : to_intel_crtc_state(crtc->state);
4986 0 : bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
4987 :
4988 0 : if (WARN_ON(intel_crtc->active))
4989 0 : return;
4990 :
4991 0 : if (intel_crtc_to_shared_dpll(intel_crtc))
4992 0 : intel_enable_shared_dpll(intel_crtc);
4993 :
4994 0 : if (intel_crtc->config->has_dp_encoder)
4995 0 : intel_dp_set_m_n(intel_crtc, M1_N1);
4996 :
4997 0 : intel_set_pipe_timings(intel_crtc);
4998 :
4999 0 : if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
5000 0 : I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
5001 : intel_crtc->config->pixel_multiplier - 1);
5002 0 : }
5003 :
5004 0 : if (intel_crtc->config->has_pch_encoder) {
5005 0 : intel_cpu_transcoder_set_m_n(intel_crtc,
5006 0 : &intel_crtc->config->fdi_m_n, NULL);
5007 0 : }
5008 :
5009 0 : haswell_set_pipeconf(crtc);
5010 :
5011 0 : intel_set_pipe_csc(crtc);
5012 :
5013 0 : intel_crtc->active = true;
5014 :
5015 0 : intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5016 0 : for_each_encoder_on_crtc(dev, crtc, encoder) {
5017 0 : if (encoder->pre_pll_enable)
5018 0 : encoder->pre_pll_enable(encoder);
5019 0 : if (encoder->pre_enable)
5020 0 : encoder->pre_enable(encoder);
5021 : }
5022 :
5023 0 : if (intel_crtc->config->has_pch_encoder) {
5024 0 : intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5025 : true);
5026 0 : dev_priv->display.fdi_link_train(crtc);
5027 0 : }
5028 :
5029 0 : if (!is_dsi)
5030 0 : intel_ddi_enable_pipe_clock(intel_crtc);
5031 :
5032 0 : if (INTEL_INFO(dev)->gen >= 9)
5033 0 : skylake_pfit_enable(intel_crtc);
5034 : else
5035 0 : ironlake_pfit_enable(intel_crtc);
5036 :
5037 : /*
5038 : * On ILK+ LUT must be loaded before the pipe is running but with
5039 : * clocks enabled
5040 : */
5041 0 : intel_crtc_load_lut(crtc);
5042 :
5043 0 : intel_ddi_set_pipe_settings(crtc);
5044 0 : if (!is_dsi)
5045 0 : intel_ddi_enable_transcoder_func(crtc);
5046 :
5047 0 : intel_update_watermarks(crtc);
5048 0 : intel_enable_pipe(intel_crtc);
5049 :
5050 0 : if (intel_crtc->config->has_pch_encoder)
5051 0 : lpt_pch_enable(crtc);
5052 :
5053 0 : if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
5054 0 : intel_ddi_set_vc_payload_alloc(crtc, true);
5055 :
5056 0 : assert_vblank_disabled(crtc);
5057 0 : drm_crtc_vblank_on(crtc);
5058 :
5059 0 : for_each_encoder_on_crtc(dev, crtc, encoder) {
5060 0 : encoder->enable(encoder);
5061 0 : intel_opregion_notify_encoder(encoder, true);
5062 0 : }
5063 :
5064 : /* If we change the relative order between pipe/planes enabling, we need
5065 : * to change the workaround. */
5066 0 : hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5067 0 : if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5068 0 : intel_wait_for_vblank(dev, hsw_workaround_pipe);
5069 0 : intel_wait_for_vblank(dev, hsw_workaround_pipe);
5070 0 : }
5071 0 : }
5072 :
5073 0 : static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5074 : {
5075 0 : struct drm_device *dev = crtc->base.dev;
5076 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5077 0 : int pipe = crtc->pipe;
5078 :
5079 : /* To avoid upsetting the power well on haswell only disable the pfit if
5080 : * it's in use. The hw state code will make sure we get this right. */
5081 0 : if (force || crtc->config->pch_pfit.enabled) {
5082 0 : I915_WRITE(PF_CTL(pipe), 0);
5083 0 : I915_WRITE(PF_WIN_POS(pipe), 0);
5084 0 : I915_WRITE(PF_WIN_SZ(pipe), 0);
5085 0 : }
5086 0 : }
5087 :
5088 0 : static void ironlake_crtc_disable(struct drm_crtc *crtc)
5089 : {
5090 0 : struct drm_device *dev = crtc->dev;
5091 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5092 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5093 : struct intel_encoder *encoder;
5094 0 : int pipe = intel_crtc->pipe;
5095 : u32 reg, temp;
5096 :
5097 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
5098 0 : encoder->disable(encoder);
5099 :
5100 0 : drm_crtc_vblank_off(crtc);
5101 0 : assert_vblank_disabled(crtc);
5102 :
5103 0 : if (intel_crtc->config->has_pch_encoder)
5104 0 : intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5105 :
5106 0 : intel_disable_pipe(intel_crtc);
5107 :
5108 0 : ironlake_pfit_disable(intel_crtc, false);
5109 :
5110 0 : if (intel_crtc->config->has_pch_encoder)
5111 0 : ironlake_fdi_disable(crtc);
5112 :
5113 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
5114 0 : if (encoder->post_disable)
5115 0 : encoder->post_disable(encoder);
5116 :
5117 0 : if (intel_crtc->config->has_pch_encoder) {
5118 0 : ironlake_disable_pch_transcoder(dev_priv, pipe);
5119 :
5120 0 : if (HAS_PCH_CPT(dev)) {
5121 : /* disable TRANS_DP_CTL */
5122 0 : reg = TRANS_DP_CTL(pipe);
5123 0 : temp = I915_READ(reg);
5124 0 : temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5125 : TRANS_DP_PORT_SEL_MASK);
5126 0 : temp |= TRANS_DP_PORT_SEL_NONE;
5127 0 : I915_WRITE(reg, temp);
5128 :
5129 : /* disable DPLL_SEL */
5130 0 : temp = I915_READ(PCH_DPLL_SEL);
5131 0 : temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5132 0 : I915_WRITE(PCH_DPLL_SEL, temp);
5133 0 : }
5134 :
5135 0 : ironlake_fdi_pll_disable(intel_crtc);
5136 0 : }
5137 0 : }
5138 :
5139 0 : static void haswell_crtc_disable(struct drm_crtc *crtc)
5140 : {
5141 0 : struct drm_device *dev = crtc->dev;
5142 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5143 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5144 : struct intel_encoder *encoder;
5145 0 : enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5146 0 : bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5147 :
5148 0 : for_each_encoder_on_crtc(dev, crtc, encoder) {
5149 0 : intel_opregion_notify_encoder(encoder, false);
5150 0 : encoder->disable(encoder);
5151 0 : }
5152 :
5153 0 : drm_crtc_vblank_off(crtc);
5154 0 : assert_vblank_disabled(crtc);
5155 :
5156 0 : if (intel_crtc->config->has_pch_encoder)
5157 0 : intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5158 : false);
5159 0 : intel_disable_pipe(intel_crtc);
5160 :
5161 0 : if (intel_crtc->config->dp_encoder_is_mst)
5162 0 : intel_ddi_set_vc_payload_alloc(crtc, false);
5163 :
5164 0 : if (!is_dsi)
5165 0 : intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5166 :
5167 0 : if (INTEL_INFO(dev)->gen >= 9)
5168 0 : skylake_scaler_disable(intel_crtc);
5169 : else
5170 0 : ironlake_pfit_disable(intel_crtc, false);
5171 :
5172 0 : if (!is_dsi)
5173 0 : intel_ddi_disable_pipe_clock(intel_crtc);
5174 :
5175 0 : if (intel_crtc->config->has_pch_encoder) {
5176 0 : lpt_disable_pch_transcoder(dev_priv);
5177 0 : intel_ddi_fdi_disable(crtc);
5178 0 : }
5179 :
5180 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
5181 0 : if (encoder->post_disable)
5182 0 : encoder->post_disable(encoder);
5183 0 : }
5184 :
5185 0 : static void i9xx_pfit_enable(struct intel_crtc *crtc)
5186 : {
5187 0 : struct drm_device *dev = crtc->base.dev;
5188 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5189 0 : struct intel_crtc_state *pipe_config = crtc->config;
5190 :
5191 0 : if (!pipe_config->gmch_pfit.control)
5192 0 : return;
5193 :
5194 : /*
5195 : * The panel fitter should only be adjusted whilst the pipe is disabled,
5196 : * according to register description and PRM.
5197 : */
5198 0 : WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5199 0 : assert_pipe_disabled(dev_priv, crtc->pipe);
5200 :
5201 0 : I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5202 0 : I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5203 :
5204 : /* Border color in case we don't scale up to the full screen. Black by
5205 : * default, change to something else for debugging. */
5206 0 : I915_WRITE(BCLRPAT(crtc->pipe), 0);
5207 0 : }
5208 :
5209 0 : static enum intel_display_power_domain port_to_power_domain(enum port port)
5210 : {
5211 0 : switch (port) {
5212 : case PORT_A:
5213 0 : return POWER_DOMAIN_PORT_DDI_A_4_LANES;
5214 : case PORT_B:
5215 0 : return POWER_DOMAIN_PORT_DDI_B_4_LANES;
5216 : case PORT_C:
5217 0 : return POWER_DOMAIN_PORT_DDI_C_4_LANES;
5218 : case PORT_D:
5219 0 : return POWER_DOMAIN_PORT_DDI_D_4_LANES;
5220 : case PORT_E:
5221 0 : return POWER_DOMAIN_PORT_DDI_E_2_LANES;
5222 : default:
5223 0 : MISSING_CASE(port);
5224 0 : return POWER_DOMAIN_PORT_OTHER;
5225 : }
5226 0 : }
5227 :
5228 0 : static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5229 : {
5230 0 : switch (port) {
5231 : case PORT_A:
5232 0 : return POWER_DOMAIN_AUX_A;
5233 : case PORT_B:
5234 0 : return POWER_DOMAIN_AUX_B;
5235 : case PORT_C:
5236 0 : return POWER_DOMAIN_AUX_C;
5237 : case PORT_D:
5238 0 : return POWER_DOMAIN_AUX_D;
5239 : case PORT_E:
5240 : /* FIXME: Check VBT for actual wiring of PORT E */
5241 0 : return POWER_DOMAIN_AUX_D;
5242 : default:
5243 0 : MISSING_CASE(port);
5244 0 : return POWER_DOMAIN_AUX_A;
5245 : }
5246 0 : }
5247 :
5248 : #define for_each_power_domain(domain, mask) \
5249 : for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
5250 : if ((1 << (domain)) & (mask))
5251 :
5252 : enum intel_display_power_domain
5253 0 : intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5254 : {
5255 0 : struct drm_device *dev = intel_encoder->base.dev;
5256 : struct intel_digital_port *intel_dig_port;
5257 :
5258 0 : switch (intel_encoder->type) {
5259 : case INTEL_OUTPUT_UNKNOWN:
5260 : /* Only DDI platforms should ever use this output type */
5261 0 : WARN_ON_ONCE(!HAS_DDI(dev));
5262 : case INTEL_OUTPUT_DISPLAYPORT:
5263 : case INTEL_OUTPUT_HDMI:
5264 : case INTEL_OUTPUT_EDP:
5265 0 : intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5266 0 : return port_to_power_domain(intel_dig_port->port);
5267 : case INTEL_OUTPUT_DP_MST:
5268 0 : intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5269 0 : return port_to_power_domain(intel_dig_port->port);
5270 : case INTEL_OUTPUT_ANALOG:
5271 0 : return POWER_DOMAIN_PORT_CRT;
5272 : case INTEL_OUTPUT_DSI:
5273 0 : return POWER_DOMAIN_PORT_DSI;
5274 : default:
5275 0 : return POWER_DOMAIN_PORT_OTHER;
5276 : }
5277 0 : }
5278 :
5279 : enum intel_display_power_domain
5280 0 : intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5281 : {
5282 0 : struct drm_device *dev = intel_encoder->base.dev;
5283 : struct intel_digital_port *intel_dig_port;
5284 :
5285 0 : switch (intel_encoder->type) {
5286 : case INTEL_OUTPUT_UNKNOWN:
5287 : case INTEL_OUTPUT_HDMI:
5288 : /*
5289 : * Only DDI platforms should ever use these output types.
5290 : * We can get here after the HDMI detect code has already set
5291 : * the type of the shared encoder. Since we can't be sure
5292 : * what's the status of the given connectors, play safe and
5293 : * run the DP detection too.
5294 : */
5295 0 : WARN_ON_ONCE(!HAS_DDI(dev));
5296 : case INTEL_OUTPUT_DISPLAYPORT:
5297 : case INTEL_OUTPUT_EDP:
5298 0 : intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5299 0 : return port_to_aux_power_domain(intel_dig_port->port);
5300 : case INTEL_OUTPUT_DP_MST:
5301 0 : intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5302 0 : return port_to_aux_power_domain(intel_dig_port->port);
5303 : default:
5304 0 : MISSING_CASE(intel_encoder->type);
5305 0 : return POWER_DOMAIN_AUX_A;
5306 : }
5307 0 : }
5308 :
5309 0 : static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5310 : {
5311 0 : struct drm_device *dev = crtc->dev;
5312 : struct intel_encoder *intel_encoder;
5313 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5314 0 : enum pipe pipe = intel_crtc->pipe;
5315 : unsigned long mask;
5316 : enum transcoder transcoder;
5317 :
5318 0 : if (!crtc->state->active)
5319 0 : return 0;
5320 :
5321 0 : transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
5322 :
5323 0 : mask = BIT(POWER_DOMAIN_PIPE(pipe));
5324 0 : mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5325 0 : if (intel_crtc->config->pch_pfit.enabled ||
5326 0 : intel_crtc->config->pch_pfit.force_thru)
5327 0 : mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5328 :
5329 0 : for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5330 0 : mask |= BIT(intel_display_port_power_domain(intel_encoder));
5331 :
5332 0 : return mask;
5333 0 : }
5334 :
5335 0 : static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
5336 : {
5337 0 : struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5338 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5339 : enum intel_display_power_domain domain;
5340 : unsigned long domains, new_domains, old_domains;
5341 :
5342 0 : old_domains = intel_crtc->enabled_power_domains;
5343 0 : intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
5344 :
5345 0 : domains = new_domains & ~old_domains;
5346 :
5347 0 : for_each_power_domain(domain, domains)
5348 0 : intel_display_power_get(dev_priv, domain);
5349 :
5350 0 : return old_domains & ~new_domains;
5351 : }
5352 :
5353 0 : static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5354 : unsigned long domains)
5355 : {
5356 : enum intel_display_power_domain domain;
5357 :
5358 0 : for_each_power_domain(domain, domains)
5359 0 : intel_display_power_put(dev_priv, domain);
5360 0 : }
5361 :
5362 0 : static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5363 : {
5364 0 : struct drm_device *dev = state->dev;
5365 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5366 0 : unsigned long put_domains[I915_MAX_PIPES] = {};
5367 : struct drm_crtc_state *crtc_state;
5368 : struct drm_crtc *crtc;
5369 : int i;
5370 :
5371 0 : for_each_crtc_in_state(state, crtc, crtc_state, i) {
5372 0 : if (needs_modeset(crtc->state))
5373 0 : put_domains[to_intel_crtc(crtc)->pipe] =
5374 0 : modeset_get_crtc_power_domains(crtc);
5375 : }
5376 :
5377 0 : if (dev_priv->display.modeset_commit_cdclk) {
5378 0 : unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
5379 :
5380 0 : if (cdclk != dev_priv->cdclk_freq &&
5381 0 : !WARN_ON(!state->allow_modeset))
5382 0 : dev_priv->display.modeset_commit_cdclk(state);
5383 0 : }
5384 :
5385 0 : for (i = 0; i < I915_MAX_PIPES; i++)
5386 0 : if (put_domains[i])
5387 0 : modeset_put_power_domains(dev_priv, put_domains[i]);
5388 0 : }
5389 :
5390 0 : static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5391 : {
5392 0 : int max_cdclk_freq = dev_priv->max_cdclk_freq;
5393 :
5394 0 : if (INTEL_INFO(dev_priv)->gen >= 9 ||
5395 0 : IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5396 0 : return max_cdclk_freq;
5397 0 : else if (IS_CHERRYVIEW(dev_priv))
5398 0 : return max_cdclk_freq*95/100;
5399 0 : else if (INTEL_INFO(dev_priv)->gen < 4)
5400 0 : return 2*max_cdclk_freq*90/100;
5401 : else
5402 0 : return max_cdclk_freq*90/100;
5403 0 : }
5404 :
5405 0 : static void intel_update_max_cdclk(struct drm_device *dev)
5406 : {
5407 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5408 :
5409 0 : if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5410 0 : u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5411 :
5412 0 : if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5413 0 : dev_priv->max_cdclk_freq = 675000;
5414 0 : else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5415 0 : dev_priv->max_cdclk_freq = 540000;
5416 0 : else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5417 0 : dev_priv->max_cdclk_freq = 450000;
5418 : else
5419 0 : dev_priv->max_cdclk_freq = 337500;
5420 0 : } else if (IS_BROADWELL(dev)) {
5421 : /*
5422 : * FIXME with extra cooling we can allow
5423 : * 540 MHz for ULX and 675 Mhz for ULT.
5424 : * How can we know if extra cooling is
5425 : * available? PCI ID, VTB, something else?
5426 : */
5427 0 : if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5428 0 : dev_priv->max_cdclk_freq = 450000;
5429 0 : else if (IS_BDW_ULX(dev))
5430 0 : dev_priv->max_cdclk_freq = 450000;
5431 0 : else if (IS_BDW_ULT(dev))
5432 0 : dev_priv->max_cdclk_freq = 540000;
5433 : else
5434 0 : dev_priv->max_cdclk_freq = 675000;
5435 0 : } else if (IS_CHERRYVIEW(dev)) {
5436 0 : dev_priv->max_cdclk_freq = 320000;
5437 0 : } else if (IS_VALLEYVIEW(dev)) {
5438 0 : dev_priv->max_cdclk_freq = 400000;
5439 0 : } else {
5440 : /* otherwise assume cdclk is fixed */
5441 0 : dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5442 : }
5443 :
5444 0 : dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5445 :
5446 : DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5447 : dev_priv->max_cdclk_freq);
5448 :
5449 : DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5450 : dev_priv->max_dotclk_freq);
5451 0 : }
5452 :
5453 0 : static void intel_update_cdclk(struct drm_device *dev)
5454 : {
5455 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5456 :
5457 0 : dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5458 : DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5459 : dev_priv->cdclk_freq);
5460 :
5461 : /*
5462 : * Program the gmbus_freq based on the cdclk frequency.
5463 : * BSpec erroneously claims we should aim for 4MHz, but
5464 : * in fact 1MHz is the correct frequency.
5465 : */
5466 0 : if (IS_VALLEYVIEW(dev)) {
5467 : /*
5468 : * Program the gmbus_freq based on the cdclk frequency.
5469 : * BSpec erroneously claims we should aim for 4MHz, but
5470 : * in fact 1MHz is the correct frequency.
5471 : */
5472 0 : I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5473 0 : }
5474 :
5475 0 : if (dev_priv->max_cdclk_freq == 0)
5476 0 : intel_update_max_cdclk(dev);
5477 0 : }
5478 :
5479 0 : static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5480 : {
5481 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5482 : uint32_t divider;
5483 : uint32_t ratio;
5484 : uint32_t current_freq;
5485 : int ret;
5486 :
5487 : /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5488 0 : switch (frequency) {
5489 : case 144000:
5490 : divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5491 : ratio = BXT_DE_PLL_RATIO(60);
5492 0 : break;
5493 : case 288000:
5494 : divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5495 : ratio = BXT_DE_PLL_RATIO(60);
5496 0 : break;
5497 : case 384000:
5498 : divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5499 : ratio = BXT_DE_PLL_RATIO(60);
5500 0 : break;
5501 : case 576000:
5502 : divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5503 : ratio = BXT_DE_PLL_RATIO(60);
5504 0 : break;
5505 : case 624000:
5506 : divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5507 : ratio = BXT_DE_PLL_RATIO(65);
5508 0 : break;
5509 : case 19200:
5510 : /*
5511 : * Bypass frequency with DE PLL disabled. Init ratio, divider
5512 : * to suppress GCC warning.
5513 : */
5514 : ratio = 0;
5515 : divider = 0;
5516 0 : break;
5517 : default:
5518 0 : DRM_ERROR("unsupported CDCLK freq %d", frequency);
5519 :
5520 0 : return;
5521 : }
5522 :
5523 0 : mutex_lock(&dev_priv->rps.hw_lock);
5524 : /* Inform power controller of upcoming frequency change */
5525 0 : ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5526 : 0x80000000);
5527 0 : mutex_unlock(&dev_priv->rps.hw_lock);
5528 :
5529 0 : if (ret) {
5530 0 : DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5531 : ret, frequency);
5532 0 : return;
5533 : }
5534 :
5535 0 : current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5536 : /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5537 0 : current_freq = current_freq * 500 + 1000;
5538 :
5539 : /*
5540 : * DE PLL has to be disabled when
5541 : * - setting to 19.2MHz (bypass, PLL isn't used)
5542 : * - before setting to 624MHz (PLL needs toggling)
5543 : * - before setting to any frequency from 624MHz (PLL needs toggling)
5544 : */
5545 0 : if (frequency == 19200 || frequency == 624000 ||
5546 0 : current_freq == 624000) {
5547 0 : I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5548 : /* Timeout 200us */
5549 0 : if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5550 : 1))
5551 0 : DRM_ERROR("timout waiting for DE PLL unlock\n");
5552 : }
5553 :
5554 0 : if (frequency != 19200) {
5555 : uint32_t val;
5556 :
5557 0 : val = I915_READ(BXT_DE_PLL_CTL);
5558 0 : val &= ~BXT_DE_PLL_RATIO_MASK;
5559 0 : val |= ratio;
5560 0 : I915_WRITE(BXT_DE_PLL_CTL, val);
5561 :
5562 0 : I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5563 : /* Timeout 200us */
5564 0 : if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5565 0 : DRM_ERROR("timeout waiting for DE PLL lock\n");
5566 :
5567 0 : val = I915_READ(CDCLK_CTL);
5568 0 : val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5569 0 : val |= divider;
5570 : /*
5571 : * Disable SSA Precharge when CD clock frequency < 500 MHz,
5572 : * enable otherwise.
5573 : */
5574 0 : val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5575 0 : if (frequency >= 500000)
5576 0 : val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5577 :
5578 0 : val &= ~CDCLK_FREQ_DECIMAL_MASK;
5579 : /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5580 0 : val |= (frequency - 1000) / 500;
5581 0 : I915_WRITE(CDCLK_CTL, val);
5582 0 : }
5583 :
5584 0 : mutex_lock(&dev_priv->rps.hw_lock);
5585 0 : ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5586 0 : DIV_ROUND_UP(frequency, 25000));
5587 0 : mutex_unlock(&dev_priv->rps.hw_lock);
5588 :
5589 0 : if (ret) {
5590 0 : DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5591 : ret, frequency);
5592 0 : return;
5593 : }
5594 :
5595 0 : intel_update_cdclk(dev);
5596 0 : }
5597 :
5598 0 : void broxton_init_cdclk(struct drm_device *dev)
5599 : {
5600 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5601 : uint32_t val;
5602 :
5603 : /*
5604 : * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5605 : * or else the reset will hang because there is no PCH to respond.
5606 : * Move the handshake programming to initialization sequence.
5607 : * Previously was left up to BIOS.
5608 : */
5609 0 : val = I915_READ(HSW_NDE_RSTWRN_OPT);
5610 0 : val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5611 0 : I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5612 :
5613 : /* Enable PG1 for cdclk */
5614 0 : intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5615 :
5616 : /* check if cd clock is enabled */
5617 0 : if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5618 : DRM_DEBUG_KMS("Display already initialized\n");
5619 0 : return;
5620 : }
5621 :
5622 : /*
5623 : * FIXME:
5624 : * - The initial CDCLK needs to be read from VBT.
5625 : * Need to make this change after VBT has changes for BXT.
5626 : * - check if setting the max (or any) cdclk freq is really necessary
5627 : * here, it belongs to modeset time
5628 : */
5629 0 : broxton_set_cdclk(dev, 624000);
5630 :
5631 0 : I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5632 0 : POSTING_READ(DBUF_CTL);
5633 :
5634 0 : udelay(10);
5635 :
5636 0 : if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5637 0 : DRM_ERROR("DBuf power enable timeout!\n");
5638 0 : }
5639 :
5640 0 : void broxton_uninit_cdclk(struct drm_device *dev)
5641 : {
5642 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5643 :
5644 0 : I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5645 0 : POSTING_READ(DBUF_CTL);
5646 :
5647 0 : udelay(10);
5648 :
5649 0 : if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5650 0 : DRM_ERROR("DBuf power disable timeout!\n");
5651 :
5652 : /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5653 0 : broxton_set_cdclk(dev, 19200);
5654 :
5655 0 : intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5656 0 : }
5657 :
5658 : static const struct skl_cdclk_entry {
5659 : unsigned int freq;
5660 : unsigned int vco;
5661 : } skl_cdclk_frequencies[] = {
5662 : { .freq = 308570, .vco = 8640 },
5663 : { .freq = 337500, .vco = 8100 },
5664 : { .freq = 432000, .vco = 8640 },
5665 : { .freq = 450000, .vco = 8100 },
5666 : { .freq = 540000, .vco = 8100 },
5667 : { .freq = 617140, .vco = 8640 },
5668 : { .freq = 675000, .vco = 8100 },
5669 : };
5670 :
5671 0 : static unsigned int skl_cdclk_decimal(unsigned int freq)
5672 : {
5673 0 : return (freq - 1000) / 500;
5674 : }
5675 :
5676 0 : static unsigned int skl_cdclk_get_vco(unsigned int freq)
5677 : {
5678 : unsigned int i;
5679 :
5680 0 : for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5681 0 : const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5682 :
5683 0 : if (e->freq == freq)
5684 0 : return e->vco;
5685 0 : }
5686 :
5687 0 : return 8100;
5688 0 : }
5689 :
5690 : static void
5691 0 : skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5692 : {
5693 : unsigned int min_freq;
5694 : u32 val;
5695 :
5696 : /* select the minimum CDCLK before enabling DPLL 0 */
5697 0 : val = I915_READ(CDCLK_CTL);
5698 : val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5699 : val |= CDCLK_FREQ_337_308;
5700 :
5701 0 : if (required_vco == 8640)
5702 0 : min_freq = 308570;
5703 : else
5704 : min_freq = 337500;
5705 :
5706 0 : val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5707 :
5708 0 : I915_WRITE(CDCLK_CTL, val);
5709 0 : POSTING_READ(CDCLK_CTL);
5710 :
5711 : /*
5712 : * We always enable DPLL0 with the lowest link rate possible, but still
5713 : * taking into account the VCO required to operate the eDP panel at the
5714 : * desired frequency. The usual DP link rates operate with a VCO of
5715 : * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5716 : * The modeset code is responsible for the selection of the exact link
5717 : * rate later on, with the constraint of choosing a frequency that
5718 : * works with required_vco.
5719 : */
5720 0 : val = I915_READ(DPLL_CTRL1);
5721 :
5722 0 : val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5723 : DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5724 0 : val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5725 0 : if (required_vco == 8640)
5726 0 : val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5727 : SKL_DPLL0);
5728 : else
5729 0 : val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5730 : SKL_DPLL0);
5731 :
5732 0 : I915_WRITE(DPLL_CTRL1, val);
5733 0 : POSTING_READ(DPLL_CTRL1);
5734 :
5735 0 : I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5736 :
5737 0 : if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5738 0 : DRM_ERROR("DPLL0 not locked\n");
5739 0 : }
5740 :
5741 0 : static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5742 : {
5743 : int ret;
5744 0 : u32 val;
5745 :
5746 : /* inform PCU we want to change CDCLK */
5747 0 : val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5748 0 : mutex_lock(&dev_priv->rps.hw_lock);
5749 0 : ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5750 0 : mutex_unlock(&dev_priv->rps.hw_lock);
5751 :
5752 0 : return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5753 0 : }
5754 :
5755 0 : static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5756 : {
5757 : unsigned int i;
5758 :
5759 0 : for (i = 0; i < 15; i++) {
5760 0 : if (skl_cdclk_pcu_ready(dev_priv))
5761 0 : return true;
5762 0 : udelay(10);
5763 : }
5764 :
5765 0 : return false;
5766 0 : }
5767 :
5768 0 : static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5769 : {
5770 0 : struct drm_device *dev = dev_priv->dev;
5771 : u32 freq_select, pcu_ack;
5772 :
5773 : DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5774 :
5775 0 : if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5776 0 : DRM_ERROR("failed to inform PCU about cdclk change\n");
5777 0 : return;
5778 : }
5779 :
5780 : /* set CDCLK_CTL */
5781 0 : switch(freq) {
5782 : case 450000:
5783 : case 432000:
5784 : freq_select = CDCLK_FREQ_450_432;
5785 : pcu_ack = 1;
5786 0 : break;
5787 : case 540000:
5788 : freq_select = CDCLK_FREQ_540;
5789 : pcu_ack = 2;
5790 0 : break;
5791 : case 308570:
5792 : case 337500:
5793 : default:
5794 : freq_select = CDCLK_FREQ_337_308;
5795 : pcu_ack = 0;
5796 0 : break;
5797 : case 617140:
5798 : case 675000:
5799 : freq_select = CDCLK_FREQ_675_617;
5800 : pcu_ack = 3;
5801 0 : break;
5802 : }
5803 :
5804 0 : I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5805 0 : POSTING_READ(CDCLK_CTL);
5806 :
5807 : /* inform PCU of the change */
5808 0 : mutex_lock(&dev_priv->rps.hw_lock);
5809 0 : sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5810 0 : mutex_unlock(&dev_priv->rps.hw_lock);
5811 :
5812 0 : intel_update_cdclk(dev);
5813 0 : }
5814 :
5815 0 : void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5816 : {
5817 : /* disable DBUF power */
5818 0 : I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5819 0 : POSTING_READ(DBUF_CTL);
5820 :
5821 0 : udelay(10);
5822 :
5823 0 : if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5824 0 : DRM_ERROR("DBuf power disable timeout\n");
5825 :
5826 : /*
5827 : * DMC assumes ownership of LCPLL and will get confused if we touch it.
5828 : */
5829 0 : if (dev_priv->csr.dmc_payload) {
5830 : /* disable DPLL0 */
5831 0 : I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
5832 : ~LCPLL_PLL_ENABLE);
5833 0 : if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5834 0 : DRM_ERROR("Couldn't disable DPLL0\n");
5835 : }
5836 :
5837 0 : intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5838 0 : }
5839 :
5840 0 : void skl_init_cdclk(struct drm_i915_private *dev_priv)
5841 : {
5842 : u32 val;
5843 : unsigned int required_vco;
5844 :
5845 : /* enable PCH reset handshake */
5846 0 : val = I915_READ(HSW_NDE_RSTWRN_OPT);
5847 0 : I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
5848 :
5849 : /* enable PG1 and Misc I/O */
5850 0 : intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5851 :
5852 : /* DPLL0 not enabled (happens on early BIOS versions) */
5853 0 : if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5854 : /* enable DPLL0 */
5855 0 : required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5856 0 : skl_dpll0_enable(dev_priv, required_vco);
5857 0 : }
5858 :
5859 : /* set CDCLK to the frequency the BIOS chose */
5860 0 : skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5861 :
5862 : /* enable DBUF power */
5863 0 : I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5864 0 : POSTING_READ(DBUF_CTL);
5865 :
5866 0 : udelay(10);
5867 :
5868 0 : if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5869 0 : DRM_ERROR("DBuf power enable timeout\n");
5870 0 : }
5871 :
5872 : /* Adjust CDclk dividers to allow high res or save power if possible */
5873 0 : static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5874 : {
5875 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5876 : u32 val, cmd;
5877 :
5878 0 : WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5879 : != dev_priv->cdclk_freq);
5880 :
5881 0 : if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5882 0 : cmd = 2;
5883 0 : else if (cdclk == 266667)
5884 0 : cmd = 1;
5885 : else
5886 : cmd = 0;
5887 :
5888 0 : mutex_lock(&dev_priv->rps.hw_lock);
5889 0 : val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5890 0 : val &= ~DSPFREQGUAR_MASK;
5891 0 : val |= (cmd << DSPFREQGUAR_SHIFT);
5892 0 : vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5893 0 : if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5894 : DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5895 : 50)) {
5896 0 : DRM_ERROR("timed out waiting for CDclk change\n");
5897 0 : }
5898 0 : mutex_unlock(&dev_priv->rps.hw_lock);
5899 :
5900 0 : mutex_lock(&dev_priv->sb_lock);
5901 :
5902 0 : if (cdclk == 400000) {
5903 : u32 divider;
5904 :
5905 0 : divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5906 :
5907 : /* adjust cdclk divider */
5908 0 : val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5909 0 : val &= ~CCK_FREQUENCY_VALUES;
5910 0 : val |= divider;
5911 0 : vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5912 :
5913 0 : if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5914 : CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5915 : 50))
5916 0 : DRM_ERROR("timed out waiting for CDclk change\n");
5917 0 : }
5918 :
5919 : /* adjust self-refresh exit latency value */
5920 0 : val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5921 0 : val &= ~0x7f;
5922 :
5923 : /*
5924 : * For high bandwidth configs, we set a higher latency in the bunit
5925 : * so that the core display fetch happens in time to avoid underruns.
5926 : */
5927 0 : if (cdclk == 400000)
5928 0 : val |= 4500 / 250; /* 4.5 usec */
5929 : else
5930 0 : val |= 3000 / 250; /* 3.0 usec */
5931 0 : vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5932 :
5933 0 : mutex_unlock(&dev_priv->sb_lock);
5934 :
5935 0 : intel_update_cdclk(dev);
5936 0 : }
5937 :
5938 0 : static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5939 : {
5940 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5941 : u32 val, cmd;
5942 :
5943 0 : WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5944 : != dev_priv->cdclk_freq);
5945 :
5946 0 : switch (cdclk) {
5947 : case 333333:
5948 : case 320000:
5949 : case 266667:
5950 : case 200000:
5951 : break;
5952 : default:
5953 0 : MISSING_CASE(cdclk);
5954 0 : return;
5955 : }
5956 :
5957 : /*
5958 : * Specs are full of misinformation, but testing on actual
5959 : * hardware has shown that we just need to write the desired
5960 : * CCK divider into the Punit register.
5961 : */
5962 0 : cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5963 :
5964 0 : mutex_lock(&dev_priv->rps.hw_lock);
5965 0 : val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5966 0 : val &= ~DSPFREQGUAR_MASK_CHV;
5967 0 : val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5968 0 : vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5969 0 : if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5970 : DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5971 : 50)) {
5972 0 : DRM_ERROR("timed out waiting for CDclk change\n");
5973 0 : }
5974 0 : mutex_unlock(&dev_priv->rps.hw_lock);
5975 :
5976 0 : intel_update_cdclk(dev);
5977 0 : }
5978 :
5979 0 : static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5980 : int max_pixclk)
5981 : {
5982 0 : int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
5983 0 : int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5984 :
5985 : /*
5986 : * Really only a few cases to deal with, as only 4 CDclks are supported:
5987 : * 200MHz
5988 : * 267MHz
5989 : * 320/333MHz (depends on HPLL freq)
5990 : * 400MHz (VLV only)
5991 : * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5992 : * of the lower bin and adjust if needed.
5993 : *
5994 : * We seem to get an unstable or solid color picture at 200MHz.
5995 : * Not sure what's wrong. For now use 200MHz only when all pipes
5996 : * are off.
5997 : */
5998 0 : if (!IS_CHERRYVIEW(dev_priv) &&
5999 0 : max_pixclk > freq_320*limit/100)
6000 0 : return 400000;
6001 0 : else if (max_pixclk > 266667*limit/100)
6002 0 : return freq_320;
6003 0 : else if (max_pixclk > 0)
6004 0 : return 266667;
6005 : else
6006 0 : return 200000;
6007 0 : }
6008 :
6009 0 : static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6010 : int max_pixclk)
6011 : {
6012 : /*
6013 : * FIXME:
6014 : * - remove the guardband, it's not needed on BXT
6015 : * - set 19.2MHz bypass frequency if there are no active pipes
6016 : */
6017 0 : if (max_pixclk > 576000*9/10)
6018 0 : return 624000;
6019 0 : else if (max_pixclk > 384000*9/10)
6020 0 : return 576000;
6021 0 : else if (max_pixclk > 288000*9/10)
6022 0 : return 384000;
6023 0 : else if (max_pixclk > 144000*9/10)
6024 0 : return 288000;
6025 : else
6026 0 : return 144000;
6027 0 : }
6028 :
6029 : /* Compute the max pixel clock for new configuration. Uses atomic state if
6030 : * that's non-NULL, look at current state otherwise. */
6031 0 : static int intel_mode_max_pixclk(struct drm_device *dev,
6032 : struct drm_atomic_state *state)
6033 : {
6034 : struct intel_crtc *intel_crtc;
6035 : struct intel_crtc_state *crtc_state;
6036 : int max_pixclk = 0;
6037 :
6038 0 : for_each_intel_crtc(dev, intel_crtc) {
6039 0 : crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6040 0 : if (IS_ERR(crtc_state))
6041 0 : return PTR_ERR(crtc_state);
6042 :
6043 0 : if (!crtc_state->base.enable)
6044 : continue;
6045 :
6046 0 : max_pixclk = max(max_pixclk,
6047 0 : crtc_state->base.adjusted_mode.crtc_clock);
6048 0 : }
6049 :
6050 0 : return max_pixclk;
6051 0 : }
6052 :
6053 0 : static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6054 : {
6055 0 : struct drm_device *dev = state->dev;
6056 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6057 0 : int max_pixclk = intel_mode_max_pixclk(dev, state);
6058 :
6059 0 : if (max_pixclk < 0)
6060 0 : return max_pixclk;
6061 :
6062 0 : to_intel_atomic_state(state)->cdclk =
6063 0 : valleyview_calc_cdclk(dev_priv, max_pixclk);
6064 :
6065 0 : return 0;
6066 0 : }
6067 :
6068 0 : static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6069 : {
6070 0 : struct drm_device *dev = state->dev;
6071 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6072 0 : int max_pixclk = intel_mode_max_pixclk(dev, state);
6073 :
6074 0 : if (max_pixclk < 0)
6075 0 : return max_pixclk;
6076 :
6077 0 : to_intel_atomic_state(state)->cdclk =
6078 0 : broxton_calc_cdclk(dev_priv, max_pixclk);
6079 :
6080 0 : return 0;
6081 0 : }
6082 :
6083 0 : static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6084 : {
6085 : unsigned int credits, default_credits;
6086 :
6087 0 : if (IS_CHERRYVIEW(dev_priv))
6088 0 : default_credits = PFI_CREDIT(12);
6089 : else
6090 : default_credits = PFI_CREDIT(8);
6091 :
6092 0 : if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6093 : /* CHV suggested value is 31 or 63 */
6094 0 : if (IS_CHERRYVIEW(dev_priv))
6095 0 : credits = PFI_CREDIT_63;
6096 : else
6097 : credits = PFI_CREDIT(15);
6098 : } else {
6099 : credits = default_credits;
6100 : }
6101 :
6102 : /*
6103 : * WA - write default credits before re-programming
6104 : * FIXME: should we also set the resend bit here?
6105 : */
6106 0 : I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6107 : default_credits);
6108 :
6109 0 : I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6110 : credits | PFI_CREDIT_RESEND);
6111 :
6112 : /*
6113 : * FIXME is this guaranteed to clear
6114 : * immediately or should we poll for it?
6115 : */
6116 0 : WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6117 0 : }
6118 :
6119 0 : static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6120 : {
6121 0 : struct drm_device *dev = old_state->dev;
6122 0 : unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
6123 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6124 :
6125 : /*
6126 : * FIXME: We can end up here with all power domains off, yet
6127 : * with a CDCLK frequency other than the minimum. To account
6128 : * for this take the PIPE-A power domain, which covers the HW
6129 : * blocks needed for the following programming. This can be
6130 : * removed once it's guaranteed that we get here either with
6131 : * the minimum CDCLK set, or the required power domains
6132 : * enabled.
6133 : */
6134 0 : intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6135 :
6136 0 : if (IS_CHERRYVIEW(dev))
6137 0 : cherryview_set_cdclk(dev, req_cdclk);
6138 : else
6139 0 : valleyview_set_cdclk(dev, req_cdclk);
6140 :
6141 0 : vlv_program_pfi_credits(dev_priv);
6142 :
6143 0 : intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6144 0 : }
6145 :
6146 0 : static void valleyview_crtc_enable(struct drm_crtc *crtc)
6147 : {
6148 0 : struct drm_device *dev = crtc->dev;
6149 0 : struct drm_i915_private *dev_priv = to_i915(dev);
6150 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6151 : struct intel_encoder *encoder;
6152 0 : int pipe = intel_crtc->pipe;
6153 : bool is_dsi;
6154 :
6155 0 : if (WARN_ON(intel_crtc->active))
6156 0 : return;
6157 :
6158 0 : is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
6159 :
6160 0 : if (intel_crtc->config->has_dp_encoder)
6161 0 : intel_dp_set_m_n(intel_crtc, M1_N1);
6162 :
6163 0 : intel_set_pipe_timings(intel_crtc);
6164 :
6165 0 : if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6166 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6167 :
6168 0 : I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6169 0 : I915_WRITE(CHV_CANVAS(pipe), 0);
6170 0 : }
6171 :
6172 0 : i9xx_set_pipeconf(intel_crtc);
6173 :
6174 0 : intel_crtc->active = true;
6175 :
6176 0 : intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6177 :
6178 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
6179 0 : if (encoder->pre_pll_enable)
6180 0 : encoder->pre_pll_enable(encoder);
6181 :
6182 0 : if (!is_dsi) {
6183 0 : if (IS_CHERRYVIEW(dev)) {
6184 0 : chv_prepare_pll(intel_crtc, intel_crtc->config);
6185 0 : chv_enable_pll(intel_crtc, intel_crtc->config);
6186 0 : } else {
6187 0 : vlv_prepare_pll(intel_crtc, intel_crtc->config);
6188 0 : vlv_enable_pll(intel_crtc, intel_crtc->config);
6189 : }
6190 : }
6191 :
6192 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
6193 0 : if (encoder->pre_enable)
6194 0 : encoder->pre_enable(encoder);
6195 :
6196 0 : i9xx_pfit_enable(intel_crtc);
6197 :
6198 0 : intel_crtc_load_lut(crtc);
6199 :
6200 0 : intel_enable_pipe(intel_crtc);
6201 :
6202 0 : assert_vblank_disabled(crtc);
6203 0 : drm_crtc_vblank_on(crtc);
6204 :
6205 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
6206 0 : encoder->enable(encoder);
6207 0 : }
6208 :
6209 0 : static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6210 : {
6211 0 : struct drm_device *dev = crtc->base.dev;
6212 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6213 :
6214 0 : I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6215 0 : I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6216 0 : }
6217 :
6218 0 : static void i9xx_crtc_enable(struct drm_crtc *crtc)
6219 : {
6220 0 : struct drm_device *dev = crtc->dev;
6221 0 : struct drm_i915_private *dev_priv = to_i915(dev);
6222 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6223 : struct intel_encoder *encoder;
6224 0 : int pipe = intel_crtc->pipe;
6225 :
6226 0 : if (WARN_ON(intel_crtc->active))
6227 0 : return;
6228 :
6229 0 : i9xx_set_pll_dividers(intel_crtc);
6230 :
6231 0 : if (intel_crtc->config->has_dp_encoder)
6232 0 : intel_dp_set_m_n(intel_crtc, M1_N1);
6233 :
6234 0 : intel_set_pipe_timings(intel_crtc);
6235 :
6236 0 : i9xx_set_pipeconf(intel_crtc);
6237 :
6238 0 : intel_crtc->active = true;
6239 :
6240 0 : if (!IS_GEN2(dev))
6241 0 : intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6242 :
6243 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
6244 0 : if (encoder->pre_enable)
6245 0 : encoder->pre_enable(encoder);
6246 :
6247 0 : i9xx_enable_pll(intel_crtc);
6248 :
6249 0 : i9xx_pfit_enable(intel_crtc);
6250 :
6251 0 : intel_crtc_load_lut(crtc);
6252 :
6253 0 : intel_update_watermarks(crtc);
6254 0 : intel_enable_pipe(intel_crtc);
6255 :
6256 0 : assert_vblank_disabled(crtc);
6257 0 : drm_crtc_vblank_on(crtc);
6258 :
6259 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
6260 0 : encoder->enable(encoder);
6261 0 : }
6262 :
6263 0 : static void i9xx_pfit_disable(struct intel_crtc *crtc)
6264 : {
6265 0 : struct drm_device *dev = crtc->base.dev;
6266 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6267 :
6268 0 : if (!crtc->config->gmch_pfit.control)
6269 0 : return;
6270 :
6271 0 : assert_pipe_disabled(dev_priv, crtc->pipe);
6272 :
6273 : DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6274 : I915_READ(PFIT_CONTROL));
6275 0 : I915_WRITE(PFIT_CONTROL, 0);
6276 0 : }
6277 :
6278 0 : static void i9xx_crtc_disable(struct drm_crtc *crtc)
6279 : {
6280 0 : struct drm_device *dev = crtc->dev;
6281 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6282 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6283 : struct intel_encoder *encoder;
6284 0 : int pipe = intel_crtc->pipe;
6285 :
6286 : /*
6287 : * On gen2 planes are double buffered but the pipe isn't, so we must
6288 : * wait for planes to fully turn off before disabling the pipe.
6289 : * We also need to wait on all gmch platforms because of the
6290 : * self-refresh mode constraint explained above.
6291 : */
6292 0 : intel_wait_for_vblank(dev, pipe);
6293 :
6294 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
6295 0 : encoder->disable(encoder);
6296 :
6297 0 : drm_crtc_vblank_off(crtc);
6298 0 : assert_vblank_disabled(crtc);
6299 :
6300 0 : intel_disable_pipe(intel_crtc);
6301 :
6302 0 : i9xx_pfit_disable(intel_crtc);
6303 :
6304 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
6305 0 : if (encoder->post_disable)
6306 0 : encoder->post_disable(encoder);
6307 :
6308 0 : if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
6309 0 : if (IS_CHERRYVIEW(dev))
6310 0 : chv_disable_pll(dev_priv, pipe);
6311 0 : else if (IS_VALLEYVIEW(dev))
6312 0 : vlv_disable_pll(dev_priv, pipe);
6313 : else
6314 0 : i9xx_disable_pll(intel_crtc);
6315 : }
6316 :
6317 0 : for_each_encoder_on_crtc(dev, crtc, encoder)
6318 0 : if (encoder->post_pll_disable)
6319 0 : encoder->post_pll_disable(encoder);
6320 :
6321 0 : if (!IS_GEN2(dev))
6322 0 : intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6323 0 : }
6324 :
6325 0 : static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6326 : {
6327 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6328 0 : struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6329 : enum intel_display_power_domain domain;
6330 : unsigned long domains;
6331 :
6332 0 : if (!intel_crtc->active)
6333 0 : return;
6334 :
6335 0 : if (to_intel_plane_state(crtc->primary->state)->visible) {
6336 0 : intel_crtc_wait_for_pending_flips(crtc);
6337 0 : intel_pre_disable_primary(crtc);
6338 :
6339 0 : intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6340 0 : to_intel_plane_state(crtc->primary->state)->visible = false;
6341 0 : }
6342 :
6343 0 : dev_priv->display.crtc_disable(crtc);
6344 0 : intel_crtc->active = false;
6345 0 : intel_update_watermarks(crtc);
6346 0 : intel_disable_shared_dpll(intel_crtc);
6347 :
6348 0 : domains = intel_crtc->enabled_power_domains;
6349 0 : for_each_power_domain(domain, domains)
6350 0 : intel_display_power_put(dev_priv, domain);
6351 0 : intel_crtc->enabled_power_domains = 0;
6352 0 : }
6353 :
6354 : /*
6355 : * turn all crtc's off, but do not adjust state
6356 : * This has to be paired with a call to intel_modeset_setup_hw_state.
6357 : */
6358 0 : int intel_display_suspend(struct drm_device *dev)
6359 : {
6360 0 : struct drm_mode_config *config = &dev->mode_config;
6361 0 : struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
6362 : struct drm_atomic_state *state;
6363 : struct drm_crtc *crtc;
6364 : unsigned crtc_mask = 0;
6365 : int ret = 0;
6366 :
6367 0 : if (WARN_ON(!ctx))
6368 0 : return 0;
6369 :
6370 : lockdep_assert_held(&ctx->ww_ctx);
6371 0 : state = drm_atomic_state_alloc(dev);
6372 0 : if (WARN_ON(!state))
6373 0 : return -ENOMEM;
6374 :
6375 0 : state->acquire_ctx = ctx;
6376 0 : state->allow_modeset = true;
6377 :
6378 0 : for_each_crtc(dev, crtc) {
6379 : struct drm_crtc_state *crtc_state =
6380 0 : drm_atomic_get_crtc_state(state, crtc);
6381 :
6382 0 : ret = PTR_ERR_OR_ZERO(crtc_state);
6383 0 : if (ret)
6384 0 : goto free;
6385 :
6386 0 : if (!crtc_state->active)
6387 0 : continue;
6388 :
6389 0 : crtc_state->active = false;
6390 0 : crtc_mask |= 1 << drm_crtc_index(crtc);
6391 0 : }
6392 :
6393 0 : if (crtc_mask) {
6394 0 : ret = drm_atomic_commit(state);
6395 :
6396 0 : if (!ret) {
6397 0 : for_each_crtc(dev, crtc)
6398 0 : if (crtc_mask & (1 << drm_crtc_index(crtc)))
6399 0 : crtc->state->active = true;
6400 :
6401 0 : return ret;
6402 : }
6403 : }
6404 :
6405 : free:
6406 0 : if (ret)
6407 0 : DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6408 0 : drm_atomic_state_free(state);
6409 0 : return ret;
6410 0 : }
6411 :
6412 0 : void intel_encoder_destroy(struct drm_encoder *encoder)
6413 : {
6414 0 : struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6415 :
6416 0 : drm_encoder_cleanup(encoder);
6417 0 : kfree(intel_encoder);
6418 0 : }
6419 :
6420 : /* Cross check the actual hw state with our own modeset state tracking (and it's
6421 : * internal consistency). */
6422 0 : static void intel_connector_check_state(struct intel_connector *connector)
6423 : {
6424 0 : struct drm_crtc *crtc = connector->base.state->crtc;
6425 :
6426 : DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6427 : connector->base.base.id,
6428 : connector->base.name);
6429 :
6430 0 : if (connector->get_hw_state(connector)) {
6431 0 : struct intel_encoder *encoder = connector->encoder;
6432 0 : struct drm_connector_state *conn_state = connector->base.state;
6433 :
6434 0 : I915_STATE_WARN(!crtc,
6435 : "connector enabled without attached crtc\n");
6436 :
6437 0 : if (!crtc)
6438 0 : return;
6439 :
6440 0 : I915_STATE_WARN(!crtc->state->active,
6441 : "connector is active, but attached crtc isn't\n");
6442 :
6443 0 : if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6444 0 : return;
6445 :
6446 0 : I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6447 : "atomic encoder doesn't match attached encoder\n");
6448 :
6449 0 : I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6450 : "attached encoder crtc differs from connector crtc\n");
6451 0 : } else {
6452 0 : I915_STATE_WARN(crtc && crtc->state->active,
6453 : "attached crtc is active, but connector isn't\n");
6454 0 : I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6455 : "best encoder set without crtc!\n");
6456 : }
6457 0 : }
6458 :
6459 0 : int intel_connector_init(struct intel_connector *connector)
6460 : {
6461 : struct drm_connector_state *connector_state;
6462 :
6463 0 : connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
6464 0 : if (!connector_state)
6465 0 : return -ENOMEM;
6466 :
6467 0 : connector->base.state = connector_state;
6468 0 : return 0;
6469 0 : }
6470 :
6471 0 : struct intel_connector *intel_connector_alloc(void)
6472 : {
6473 : struct intel_connector *connector;
6474 :
6475 0 : connector = kzalloc(sizeof *connector, GFP_KERNEL);
6476 0 : if (!connector)
6477 0 : return NULL;
6478 :
6479 0 : if (intel_connector_init(connector) < 0) {
6480 0 : kfree(connector);
6481 0 : return NULL;
6482 : }
6483 :
6484 0 : return connector;
6485 0 : }
6486 :
6487 : /* Simple connector->get_hw_state implementation for encoders that support only
6488 : * one connector and no cloning and hence the encoder state determines the state
6489 : * of the connector. */
6490 0 : bool intel_connector_get_hw_state(struct intel_connector *connector)
6491 : {
6492 0 : enum pipe pipe = 0;
6493 0 : struct intel_encoder *encoder = connector->encoder;
6494 :
6495 0 : return encoder->get_hw_state(encoder, &pipe);
6496 0 : }
6497 :
6498 0 : static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6499 : {
6500 0 : if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6501 0 : return crtc_state->fdi_lanes;
6502 :
6503 0 : return 0;
6504 0 : }
6505 :
6506 0 : static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6507 : struct intel_crtc_state *pipe_config)
6508 : {
6509 0 : struct drm_atomic_state *state = pipe_config->base.state;
6510 : struct intel_crtc *other_crtc;
6511 : struct intel_crtc_state *other_crtc_state;
6512 :
6513 : DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6514 : pipe_name(pipe), pipe_config->fdi_lanes);
6515 0 : if (pipe_config->fdi_lanes > 4) {
6516 : DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6517 : pipe_name(pipe), pipe_config->fdi_lanes);
6518 0 : return -EINVAL;
6519 : }
6520 :
6521 0 : if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6522 0 : if (pipe_config->fdi_lanes > 2) {
6523 : DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6524 : pipe_config->fdi_lanes);
6525 0 : return -EINVAL;
6526 : } else {
6527 0 : return 0;
6528 : }
6529 : }
6530 :
6531 0 : if (INTEL_INFO(dev)->num_pipes == 2)
6532 0 : return 0;
6533 :
6534 : /* Ivybridge 3 pipe is really complicated */
6535 0 : switch (pipe) {
6536 : case PIPE_A:
6537 0 : return 0;
6538 : case PIPE_B:
6539 0 : if (pipe_config->fdi_lanes <= 2)
6540 0 : return 0;
6541 :
6542 0 : other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6543 : other_crtc_state =
6544 0 : intel_atomic_get_crtc_state(state, other_crtc);
6545 0 : if (IS_ERR(other_crtc_state))
6546 0 : return PTR_ERR(other_crtc_state);
6547 :
6548 0 : if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6549 : DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6550 : pipe_name(pipe), pipe_config->fdi_lanes);
6551 0 : return -EINVAL;
6552 : }
6553 0 : return 0;
6554 : case PIPE_C:
6555 0 : if (pipe_config->fdi_lanes > 2) {
6556 : DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6557 : pipe_name(pipe), pipe_config->fdi_lanes);
6558 0 : return -EINVAL;
6559 : }
6560 :
6561 0 : other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6562 : other_crtc_state =
6563 0 : intel_atomic_get_crtc_state(state, other_crtc);
6564 0 : if (IS_ERR(other_crtc_state))
6565 0 : return PTR_ERR(other_crtc_state);
6566 :
6567 0 : if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6568 : DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6569 0 : return -EINVAL;
6570 : }
6571 0 : return 0;
6572 : default:
6573 0 : BUG();
6574 : }
6575 0 : }
6576 :
6577 : #define RETRY 1
6578 0 : static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6579 : struct intel_crtc_state *pipe_config)
6580 : {
6581 0 : struct drm_device *dev = intel_crtc->base.dev;
6582 0 : const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6583 : int lane, link_bw, fdi_dotclock, ret;
6584 0 : bool needs_recompute = false;
6585 :
6586 : retry:
6587 : /* FDI is a binary signal running at ~2.7GHz, encoding
6588 : * each output octet as 10 bits. The actual frequency
6589 : * is stored as a divider into a 100MHz clock, and the
6590 : * mode pixel clock is stored in units of 1KHz.
6591 : * Hence the bw of each lane in terms of the mode signal
6592 : * is:
6593 : */
6594 0 : link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6595 :
6596 0 : fdi_dotclock = adjusted_mode->crtc_clock;
6597 :
6598 0 : lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6599 0 : pipe_config->pipe_bpp);
6600 :
6601 0 : pipe_config->fdi_lanes = lane;
6602 :
6603 0 : intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6604 0 : link_bw, &pipe_config->fdi_m_n);
6605 :
6606 0 : ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6607 0 : intel_crtc->pipe, pipe_config);
6608 0 : if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6609 0 : pipe_config->pipe_bpp -= 2*3;
6610 : DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6611 : pipe_config->pipe_bpp);
6612 : needs_recompute = true;
6613 0 : pipe_config->bw_constrained = true;
6614 :
6615 0 : goto retry;
6616 : }
6617 :
6618 0 : if (needs_recompute)
6619 0 : return RETRY;
6620 :
6621 0 : return ret;
6622 0 : }
6623 :
6624 0 : static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6625 : struct intel_crtc_state *pipe_config)
6626 : {
6627 0 : if (pipe_config->pipe_bpp > 24)
6628 0 : return false;
6629 :
6630 : /* HSW can handle pixel rate up to cdclk? */
6631 0 : if (IS_HASWELL(dev_priv->dev))
6632 0 : return true;
6633 :
6634 : /*
6635 : * We compare against max which means we must take
6636 : * the increased cdclk requirement into account when
6637 : * calculating the new cdclk.
6638 : *
6639 : * Should measure whether using a lower cdclk w/o IPS
6640 : */
6641 0 : return ilk_pipe_pixel_rate(pipe_config) <=
6642 0 : dev_priv->max_cdclk_freq * 95 / 100;
6643 0 : }
6644 :
6645 0 : static void hsw_compute_ips_config(struct intel_crtc *crtc,
6646 : struct intel_crtc_state *pipe_config)
6647 : {
6648 0 : struct drm_device *dev = crtc->base.dev;
6649 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6650 :
6651 0 : pipe_config->ips_enabled = i915.enable_ips &&
6652 0 : hsw_crtc_supports_ips(crtc) &&
6653 0 : pipe_config_supports_ips(dev_priv, pipe_config);
6654 0 : }
6655 :
6656 0 : static int intel_crtc_compute_config(struct intel_crtc *crtc,
6657 : struct intel_crtc_state *pipe_config)
6658 : {
6659 0 : struct drm_device *dev = crtc->base.dev;
6660 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6661 0 : const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6662 :
6663 : /* FIXME should check pixel clock limits on all platforms */
6664 0 : if (INTEL_INFO(dev)->gen < 4) {
6665 0 : int clock_limit = dev_priv->max_cdclk_freq;
6666 :
6667 : /*
6668 : * Enable pixel doubling when the dot clock
6669 : * is > 90% of the (display) core speed.
6670 : *
6671 : * GDG double wide on either pipe,
6672 : * otherwise pipe A only.
6673 : */
6674 0 : if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
6675 0 : adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
6676 0 : clock_limit *= 2;
6677 0 : pipe_config->double_wide = true;
6678 0 : }
6679 :
6680 0 : if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
6681 0 : return -EINVAL;
6682 0 : }
6683 :
6684 : /*
6685 : * Pipe horizontal size must be even in:
6686 : * - DVO ganged mode
6687 : * - LVDS dual channel mode
6688 : * - Double wide pipe
6689 : */
6690 0 : if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6691 0 : intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6692 0 : pipe_config->pipe_src_w &= ~1;
6693 :
6694 : /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6695 : * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6696 : */
6697 0 : if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6698 0 : adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6699 0 : return -EINVAL;
6700 :
6701 0 : if (HAS_IPS(dev))
6702 0 : hsw_compute_ips_config(crtc, pipe_config);
6703 :
6704 0 : if (pipe_config->has_pch_encoder)
6705 0 : return ironlake_fdi_compute_config(crtc, pipe_config);
6706 :
6707 0 : return 0;
6708 0 : }
6709 :
6710 0 : static int skylake_get_display_clock_speed(struct drm_device *dev)
6711 : {
6712 0 : struct drm_i915_private *dev_priv = to_i915(dev);
6713 0 : uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6714 0 : uint32_t cdctl = I915_READ(CDCLK_CTL);
6715 : uint32_t linkrate;
6716 :
6717 0 : if (!(lcpll1 & LCPLL_PLL_ENABLE))
6718 0 : return 24000; /* 24MHz is the cd freq with NSSC ref */
6719 :
6720 0 : if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6721 0 : return 540000;
6722 :
6723 0 : linkrate = (I915_READ(DPLL_CTRL1) &
6724 0 : DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6725 :
6726 0 : if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6727 0 : linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6728 : /* vco 8640 */
6729 0 : switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6730 : case CDCLK_FREQ_450_432:
6731 0 : return 432000;
6732 : case CDCLK_FREQ_337_308:
6733 0 : return 308570;
6734 : case CDCLK_FREQ_675_617:
6735 0 : return 617140;
6736 : default:
6737 0 : WARN(1, "Unknown cd freq selection\n");
6738 : }
6739 0 : } else {
6740 : /* vco 8100 */
6741 0 : switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6742 : case CDCLK_FREQ_450_432:
6743 0 : return 450000;
6744 : case CDCLK_FREQ_337_308:
6745 0 : return 337500;
6746 : case CDCLK_FREQ_675_617:
6747 0 : return 675000;
6748 : default:
6749 0 : WARN(1, "Unknown cd freq selection\n");
6750 : }
6751 : }
6752 :
6753 : /* error case, do as if DPLL0 isn't enabled */
6754 0 : return 24000;
6755 0 : }
6756 :
6757 0 : static int broxton_get_display_clock_speed(struct drm_device *dev)
6758 : {
6759 0 : struct drm_i915_private *dev_priv = to_i915(dev);
6760 0 : uint32_t cdctl = I915_READ(CDCLK_CTL);
6761 0 : uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6762 0 : uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6763 : int cdclk;
6764 :
6765 0 : if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6766 0 : return 19200;
6767 :
6768 0 : cdclk = 19200 * pll_ratio / 2;
6769 :
6770 0 : switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6771 : case BXT_CDCLK_CD2X_DIV_SEL_1:
6772 0 : return cdclk; /* 576MHz or 624MHz */
6773 : case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6774 0 : return cdclk * 2 / 3; /* 384MHz */
6775 : case BXT_CDCLK_CD2X_DIV_SEL_2:
6776 0 : return cdclk / 2; /* 288MHz */
6777 : case BXT_CDCLK_CD2X_DIV_SEL_4:
6778 0 : return cdclk / 4; /* 144MHz */
6779 : }
6780 :
6781 : /* error case, do as if DE PLL isn't enabled */
6782 : return 19200;
6783 0 : }
6784 :
6785 0 : static int broadwell_get_display_clock_speed(struct drm_device *dev)
6786 : {
6787 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6788 0 : uint32_t lcpll = I915_READ(LCPLL_CTL);
6789 0 : uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6790 :
6791 0 : if (lcpll & LCPLL_CD_SOURCE_FCLK)
6792 0 : return 800000;
6793 0 : else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6794 0 : return 450000;
6795 0 : else if (freq == LCPLL_CLK_FREQ_450)
6796 0 : return 450000;
6797 0 : else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6798 0 : return 540000;
6799 0 : else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6800 0 : return 337500;
6801 : else
6802 0 : return 675000;
6803 0 : }
6804 :
6805 0 : static int haswell_get_display_clock_speed(struct drm_device *dev)
6806 : {
6807 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6808 0 : uint32_t lcpll = I915_READ(LCPLL_CTL);
6809 0 : uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6810 :
6811 0 : if (lcpll & LCPLL_CD_SOURCE_FCLK)
6812 0 : return 800000;
6813 0 : else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6814 0 : return 450000;
6815 0 : else if (freq == LCPLL_CLK_FREQ_450)
6816 0 : return 450000;
6817 0 : else if (IS_HSW_ULT(dev))
6818 0 : return 337500;
6819 : else
6820 0 : return 540000;
6821 0 : }
6822 :
6823 0 : static int valleyview_get_display_clock_speed(struct drm_device *dev)
6824 : {
6825 0 : return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6826 : CCK_DISPLAY_CLOCK_CONTROL);
6827 : }
6828 :
6829 0 : static int ilk_get_display_clock_speed(struct drm_device *dev)
6830 : {
6831 0 : return 450000;
6832 : }
6833 :
6834 0 : static int i945_get_display_clock_speed(struct drm_device *dev)
6835 : {
6836 0 : return 400000;
6837 : }
6838 :
6839 0 : static int i915_get_display_clock_speed(struct drm_device *dev)
6840 : {
6841 0 : return 333333;
6842 : }
6843 :
6844 0 : static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6845 : {
6846 0 : return 200000;
6847 : }
6848 :
6849 0 : static int pnv_get_display_clock_speed(struct drm_device *dev)
6850 : {
6851 0 : u16 gcfgc = 0;
6852 :
6853 0 : pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6854 :
6855 0 : switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6856 : case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6857 0 : return 266667;
6858 : case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6859 0 : return 333333;
6860 : case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6861 0 : return 444444;
6862 : case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6863 0 : return 200000;
6864 : default:
6865 0 : DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6866 : case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6867 0 : return 133333;
6868 : case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6869 0 : return 166667;
6870 : }
6871 0 : }
6872 :
6873 0 : static int i915gm_get_display_clock_speed(struct drm_device *dev)
6874 : {
6875 0 : u16 gcfgc = 0;
6876 :
6877 0 : pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6878 :
6879 0 : if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6880 0 : return 133333;
6881 : else {
6882 0 : switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6883 : case GC_DISPLAY_CLOCK_333_MHZ:
6884 0 : return 333333;
6885 : default:
6886 : case GC_DISPLAY_CLOCK_190_200_MHZ:
6887 0 : return 190000;
6888 : }
6889 : }
6890 0 : }
6891 :
6892 0 : static int i865_get_display_clock_speed(struct drm_device *dev)
6893 : {
6894 0 : return 266667;
6895 : }
6896 :
6897 0 : static int i85x_get_display_clock_speed(struct drm_device *dev)
6898 : {
6899 0 : u16 hpllcc = 0;
6900 :
6901 : /*
6902 : * 852GM/852GMV only supports 133 MHz and the HPLLCC
6903 : * encoding is different :(
6904 : * FIXME is this the right way to detect 852GM/852GMV?
6905 : */
6906 0 : if (dev->pdev->revision == 0x1)
6907 0 : return 133333;
6908 :
6909 0 : pci_bus_read_config_word(dev->pdev->bus,
6910 : PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6911 :
6912 : /* Assume that the hardware is in the high speed state. This
6913 : * should be the default.
6914 : */
6915 0 : switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6916 : case GC_CLOCK_133_200:
6917 : case GC_CLOCK_133_200_2:
6918 : case GC_CLOCK_100_200:
6919 0 : return 200000;
6920 : case GC_CLOCK_166_250:
6921 0 : return 250000;
6922 : case GC_CLOCK_100_133:
6923 0 : return 133333;
6924 : case GC_CLOCK_133_266:
6925 : case GC_CLOCK_133_266_2:
6926 : case GC_CLOCK_166_266:
6927 0 : return 266667;
6928 : }
6929 :
6930 : /* Shouldn't happen */
6931 : return 0;
6932 0 : }
6933 :
6934 0 : static int i830_get_display_clock_speed(struct drm_device *dev)
6935 : {
6936 0 : return 133333;
6937 : }
6938 :
6939 0 : static unsigned int intel_hpll_vco(struct drm_device *dev)
6940 : {
6941 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6942 : static const unsigned int blb_vco[8] = {
6943 : [0] = 3200000,
6944 : [1] = 4000000,
6945 : [2] = 5333333,
6946 : [3] = 4800000,
6947 : [4] = 6400000,
6948 : };
6949 : static const unsigned int pnv_vco[8] = {
6950 : [0] = 3200000,
6951 : [1] = 4000000,
6952 : [2] = 5333333,
6953 : [3] = 4800000,
6954 : [4] = 2666667,
6955 : };
6956 : static const unsigned int cl_vco[8] = {
6957 : [0] = 3200000,
6958 : [1] = 4000000,
6959 : [2] = 5333333,
6960 : [3] = 6400000,
6961 : [4] = 3333333,
6962 : [5] = 3566667,
6963 : [6] = 4266667,
6964 : };
6965 : static const unsigned int elk_vco[8] = {
6966 : [0] = 3200000,
6967 : [1] = 4000000,
6968 : [2] = 5333333,
6969 : [3] = 4800000,
6970 : };
6971 : static const unsigned int ctg_vco[8] = {
6972 : [0] = 3200000,
6973 : [1] = 4000000,
6974 : [2] = 5333333,
6975 : [3] = 6400000,
6976 : [4] = 2666667,
6977 : [5] = 4266667,
6978 : };
6979 : const unsigned int *vco_table;
6980 : unsigned int vco;
6981 : uint8_t tmp = 0;
6982 :
6983 : /* FIXME other chipsets? */
6984 0 : if (IS_GM45(dev))
6985 0 : vco_table = ctg_vco;
6986 0 : else if (IS_G4X(dev))
6987 0 : vco_table = elk_vco;
6988 0 : else if (IS_CRESTLINE(dev))
6989 0 : vco_table = cl_vco;
6990 0 : else if (IS_PINEVIEW(dev))
6991 0 : vco_table = pnv_vco;
6992 0 : else if (IS_G33(dev))
6993 : vco_table = blb_vco;
6994 : else
6995 0 : return 0;
6996 :
6997 0 : tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6998 :
6999 0 : vco = vco_table[tmp & 0x7];
7000 0 : if (vco == 0)
7001 0 : DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
7002 : else
7003 : DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
7004 :
7005 0 : return vco;
7006 0 : }
7007 :
7008 0 : static int gm45_get_display_clock_speed(struct drm_device *dev)
7009 : {
7010 0 : unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7011 0 : uint16_t tmp = 0;
7012 :
7013 0 : pci_read_config_word(dev->pdev, GCFGC, &tmp);
7014 :
7015 0 : cdclk_sel = (tmp >> 12) & 0x1;
7016 :
7017 0 : switch (vco) {
7018 : case 2666667:
7019 : case 4000000:
7020 : case 5333333:
7021 0 : return cdclk_sel ? 333333 : 222222;
7022 : case 3200000:
7023 0 : return cdclk_sel ? 320000 : 228571;
7024 : default:
7025 0 : DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7026 0 : return 222222;
7027 : }
7028 0 : }
7029 :
7030 0 : static int i965gm_get_display_clock_speed(struct drm_device *dev)
7031 : {
7032 : static const uint8_t div_3200[] = { 16, 10, 8 };
7033 : static const uint8_t div_4000[] = { 20, 12, 10 };
7034 : static const uint8_t div_5333[] = { 24, 16, 14 };
7035 : const uint8_t *div_table;
7036 0 : unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7037 0 : uint16_t tmp = 0;
7038 :
7039 0 : pci_read_config_word(dev->pdev, GCFGC, &tmp);
7040 :
7041 0 : cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7042 :
7043 0 : if (cdclk_sel >= ARRAY_SIZE(div_3200))
7044 : goto fail;
7045 :
7046 0 : switch (vco) {
7047 : case 3200000:
7048 : div_table = div_3200;
7049 0 : break;
7050 : case 4000000:
7051 : div_table = div_4000;
7052 0 : break;
7053 : case 5333333:
7054 : div_table = div_5333;
7055 0 : break;
7056 : default:
7057 : goto fail;
7058 : }
7059 :
7060 0 : return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7061 :
7062 : fail:
7063 0 : DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7064 0 : return 200000;
7065 0 : }
7066 :
7067 0 : static int g33_get_display_clock_speed(struct drm_device *dev)
7068 : {
7069 : static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
7070 : static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
7071 : static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7072 : static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7073 : const uint8_t *div_table;
7074 0 : unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7075 0 : uint16_t tmp = 0;
7076 :
7077 0 : pci_read_config_word(dev->pdev, GCFGC, &tmp);
7078 :
7079 0 : cdclk_sel = (tmp >> 4) & 0x7;
7080 :
7081 0 : if (cdclk_sel >= ARRAY_SIZE(div_3200))
7082 : goto fail;
7083 :
7084 0 : switch (vco) {
7085 : case 3200000:
7086 : div_table = div_3200;
7087 0 : break;
7088 : case 4000000:
7089 : div_table = div_4000;
7090 0 : break;
7091 : case 4800000:
7092 : div_table = div_4800;
7093 0 : break;
7094 : case 5333333:
7095 : div_table = div_5333;
7096 0 : break;
7097 : default:
7098 : goto fail;
7099 : }
7100 :
7101 0 : return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7102 :
7103 : fail:
7104 0 : DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7105 0 : return 190476;
7106 0 : }
7107 :
7108 : static void
7109 0 : intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7110 : {
7111 0 : while (*num > DATA_LINK_M_N_MASK ||
7112 0 : *den > DATA_LINK_M_N_MASK) {
7113 0 : *num >>= 1;
7114 0 : *den >>= 1;
7115 : }
7116 0 : }
7117 :
7118 0 : static void compute_m_n(unsigned int m, unsigned int n,
7119 : uint32_t *ret_m, uint32_t *ret_n)
7120 : {
7121 0 : *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7122 0 : *ret_m = div_u64((uint64_t) m * *ret_n, n);
7123 0 : intel_reduce_m_n_ratio(ret_m, ret_n);
7124 0 : }
7125 :
7126 : void
7127 0 : intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7128 : int pixel_clock, int link_clock,
7129 : struct intel_link_m_n *m_n)
7130 : {
7131 0 : m_n->tu = 64;
7132 :
7133 0 : compute_m_n(bits_per_pixel * pixel_clock,
7134 0 : link_clock * nlanes * 8,
7135 0 : &m_n->gmch_m, &m_n->gmch_n);
7136 :
7137 0 : compute_m_n(pixel_clock, link_clock,
7138 0 : &m_n->link_m, &m_n->link_n);
7139 0 : }
7140 :
7141 0 : static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7142 : {
7143 0 : if (i915.panel_use_ssc >= 0)
7144 0 : return i915.panel_use_ssc != 0;
7145 0 : return dev_priv->vbt.lvds_use_ssc
7146 0 : && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7147 0 : }
7148 :
7149 0 : static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7150 : int num_connectors)
7151 : {
7152 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
7153 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7154 : int refclk;
7155 :
7156 0 : WARN_ON(!crtc_state->base.state);
7157 :
7158 0 : if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
7159 : refclk = 100000;
7160 0 : } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7161 0 : intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7162 0 : refclk = dev_priv->vbt.lvds_ssc_freq;
7163 : DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7164 0 : } else if (!IS_GEN2(dev)) {
7165 : refclk = 96000;
7166 0 : } else {
7167 : refclk = 48000;
7168 : }
7169 :
7170 0 : return refclk;
7171 : }
7172 :
7173 0 : static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7174 : {
7175 0 : return (1 << dpll->n) << 16 | dpll->m2;
7176 : }
7177 :
7178 0 : static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7179 : {
7180 0 : return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7181 : }
7182 :
7183 0 : static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7184 : struct intel_crtc_state *crtc_state,
7185 : intel_clock_t *reduced_clock)
7186 : {
7187 0 : struct drm_device *dev = crtc->base.dev;
7188 : u32 fp, fp2 = 0;
7189 :
7190 0 : if (IS_PINEVIEW(dev)) {
7191 0 : fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7192 0 : if (reduced_clock)
7193 0 : fp2 = pnv_dpll_compute_fp(reduced_clock);
7194 : } else {
7195 0 : fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7196 0 : if (reduced_clock)
7197 0 : fp2 = i9xx_dpll_compute_fp(reduced_clock);
7198 : }
7199 :
7200 0 : crtc_state->dpll_hw_state.fp0 = fp;
7201 :
7202 0 : crtc->lowfreq_avail = false;
7203 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7204 0 : reduced_clock) {
7205 0 : crtc_state->dpll_hw_state.fp1 = fp2;
7206 0 : crtc->lowfreq_avail = true;
7207 0 : } else {
7208 0 : crtc_state->dpll_hw_state.fp1 = fp;
7209 : }
7210 0 : }
7211 :
7212 0 : static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7213 : pipe)
7214 : {
7215 : u32 reg_val;
7216 :
7217 : /*
7218 : * PLLB opamp always calibrates to max value of 0x3f, force enable it
7219 : * and set it to a reasonable value instead.
7220 : */
7221 0 : reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7222 0 : reg_val &= 0xffffff00;
7223 0 : reg_val |= 0x00000030;
7224 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7225 :
7226 0 : reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7227 : reg_val &= 0x8cffffff;
7228 : reg_val = 0x8c000000;
7229 0 : vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7230 :
7231 0 : reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7232 0 : reg_val &= 0xffffff00;
7233 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7234 :
7235 0 : reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7236 0 : reg_val &= 0x00ffffff;
7237 0 : reg_val |= 0xb0000000;
7238 0 : vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7239 0 : }
7240 :
7241 0 : static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7242 : struct intel_link_m_n *m_n)
7243 : {
7244 0 : struct drm_device *dev = crtc->base.dev;
7245 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7246 0 : int pipe = crtc->pipe;
7247 :
7248 0 : I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7249 0 : I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7250 0 : I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7251 0 : I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7252 0 : }
7253 :
7254 0 : static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7255 : struct intel_link_m_n *m_n,
7256 : struct intel_link_m_n *m2_n2)
7257 : {
7258 0 : struct drm_device *dev = crtc->base.dev;
7259 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7260 0 : int pipe = crtc->pipe;
7261 0 : enum transcoder transcoder = crtc->config->cpu_transcoder;
7262 :
7263 0 : if (INTEL_INFO(dev)->gen >= 5) {
7264 0 : I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7265 0 : I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7266 0 : I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7267 0 : I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7268 : /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7269 : * for gen < 8) and if DRRS is supported (to make sure the
7270 : * registers are not unnecessarily accessed).
7271 : */
7272 0 : if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7273 0 : crtc->config->has_drrs) {
7274 0 : I915_WRITE(PIPE_DATA_M2(transcoder),
7275 : TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7276 0 : I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7277 0 : I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7278 0 : I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7279 0 : }
7280 : } else {
7281 0 : I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7282 0 : I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7283 0 : I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7284 0 : I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7285 : }
7286 0 : }
7287 :
7288 0 : void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7289 : {
7290 : struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7291 :
7292 0 : if (m_n == M1_N1) {
7293 0 : dp_m_n = &crtc->config->dp_m_n;
7294 0 : dp_m2_n2 = &crtc->config->dp_m2_n2;
7295 0 : } else if (m_n == M2_N2) {
7296 :
7297 : /*
7298 : * M2_N2 registers are not supported. Hence m2_n2 divider value
7299 : * needs to be programmed into M1_N1.
7300 : */
7301 0 : dp_m_n = &crtc->config->dp_m2_n2;
7302 : } else {
7303 0 : DRM_ERROR("Unsupported divider value\n");
7304 0 : return;
7305 : }
7306 :
7307 0 : if (crtc->config->has_pch_encoder)
7308 0 : intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7309 : else
7310 0 : intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7311 0 : }
7312 :
7313 0 : static void vlv_compute_dpll(struct intel_crtc *crtc,
7314 : struct intel_crtc_state *pipe_config)
7315 : {
7316 : u32 dpll, dpll_md;
7317 :
7318 : /*
7319 : * Enable DPIO clock input. We should never disable the reference
7320 : * clock for pipe B, since VGA hotplug / manual detection depends
7321 : * on it.
7322 : */
7323 : dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7324 : DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
7325 : /* We should never disable this, set it here for state tracking */
7326 0 : if (crtc->pipe == PIPE_B)
7327 0 : dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7328 0 : dpll |= DPLL_VCO_ENABLE;
7329 0 : pipe_config->dpll_hw_state.dpll = dpll;
7330 :
7331 0 : dpll_md = (pipe_config->pixel_multiplier - 1)
7332 0 : << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7333 0 : pipe_config->dpll_hw_state.dpll_md = dpll_md;
7334 0 : }
7335 :
7336 0 : static void vlv_prepare_pll(struct intel_crtc *crtc,
7337 : const struct intel_crtc_state *pipe_config)
7338 : {
7339 0 : struct drm_device *dev = crtc->base.dev;
7340 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7341 0 : int pipe = crtc->pipe;
7342 : u32 mdiv;
7343 : u32 bestn, bestm1, bestm2, bestp1, bestp2;
7344 : u32 coreclk, reg_val;
7345 :
7346 0 : mutex_lock(&dev_priv->sb_lock);
7347 :
7348 0 : bestn = pipe_config->dpll.n;
7349 0 : bestm1 = pipe_config->dpll.m1;
7350 0 : bestm2 = pipe_config->dpll.m2;
7351 0 : bestp1 = pipe_config->dpll.p1;
7352 0 : bestp2 = pipe_config->dpll.p2;
7353 :
7354 : /* See eDP HDMI DPIO driver vbios notes doc */
7355 :
7356 : /* PLL B needs special handling */
7357 0 : if (pipe == PIPE_B)
7358 0 : vlv_pllb_recal_opamp(dev_priv, pipe);
7359 :
7360 : /* Set up Tx target for periodic Rcomp update */
7361 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7362 :
7363 : /* Disable target IRef on PLL */
7364 0 : reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7365 0 : reg_val &= 0x00ffffff;
7366 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7367 :
7368 : /* Disable fast lock */
7369 0 : vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7370 :
7371 : /* Set idtafcrecal before PLL is enabled */
7372 0 : mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7373 0 : mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7374 0 : mdiv |= ((bestn << DPIO_N_SHIFT));
7375 0 : mdiv |= (1 << DPIO_K_SHIFT);
7376 :
7377 : /*
7378 : * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7379 : * but we don't support that).
7380 : * Note: don't use the DAC post divider as it seems unstable.
7381 : */
7382 0 : mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7383 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7384 :
7385 0 : mdiv |= DPIO_ENABLE_CALIBRATION;
7386 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7387 :
7388 : /* Set HBR and RBR LPF coefficients */
7389 0 : if (pipe_config->port_clock == 162000 ||
7390 0 : intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7391 0 : intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7392 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7393 : 0x009f0003);
7394 : else
7395 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7396 : 0x00d0000f);
7397 :
7398 0 : if (pipe_config->has_dp_encoder) {
7399 : /* Use SSC source */
7400 0 : if (pipe == PIPE_A)
7401 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7402 : 0x0df40000);
7403 : else
7404 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7405 : 0x0df70000);
7406 : } else { /* HDMI or VGA */
7407 : /* Use bend source */
7408 0 : if (pipe == PIPE_A)
7409 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7410 : 0x0df70000);
7411 : else
7412 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7413 : 0x0df40000);
7414 : }
7415 :
7416 0 : coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7417 0 : coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7418 0 : if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7419 0 : intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7420 0 : coreclk |= 0x01000000;
7421 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7422 :
7423 0 : vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7424 0 : mutex_unlock(&dev_priv->sb_lock);
7425 0 : }
7426 :
7427 0 : static void chv_compute_dpll(struct intel_crtc *crtc,
7428 : struct intel_crtc_state *pipe_config)
7429 : {
7430 0 : pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7431 : DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
7432 : DPLL_VCO_ENABLE;
7433 0 : if (crtc->pipe != PIPE_A)
7434 0 : pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7435 :
7436 0 : pipe_config->dpll_hw_state.dpll_md =
7437 0 : (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7438 0 : }
7439 :
7440 0 : static void chv_prepare_pll(struct intel_crtc *crtc,
7441 : const struct intel_crtc_state *pipe_config)
7442 : {
7443 0 : struct drm_device *dev = crtc->base.dev;
7444 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7445 0 : int pipe = crtc->pipe;
7446 0 : int dpll_reg = DPLL(crtc->pipe);
7447 0 : enum dpio_channel port = vlv_pipe_to_channel(pipe);
7448 : u32 loopfilter, tribuf_calcntr;
7449 : u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7450 : u32 dpio_val;
7451 : int vco;
7452 :
7453 0 : bestn = pipe_config->dpll.n;
7454 0 : bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7455 0 : bestm1 = pipe_config->dpll.m1;
7456 0 : bestm2 = pipe_config->dpll.m2 >> 22;
7457 0 : bestp1 = pipe_config->dpll.p1;
7458 0 : bestp2 = pipe_config->dpll.p2;
7459 0 : vco = pipe_config->dpll.vco;
7460 : dpio_val = 0;
7461 : loopfilter = 0;
7462 :
7463 : /*
7464 : * Enable Refclk and SSC
7465 : */
7466 0 : I915_WRITE(dpll_reg,
7467 : pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7468 :
7469 0 : mutex_lock(&dev_priv->sb_lock);
7470 :
7471 : /* p1 and p2 divider */
7472 0 : vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7473 0 : 5 << DPIO_CHV_S1_DIV_SHIFT |
7474 0 : bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7475 0 : bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7476 : 1 << DPIO_CHV_K_DIV_SHIFT);
7477 :
7478 : /* Feedback post-divider - m2 */
7479 0 : vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7480 :
7481 : /* Feedback refclk divider - n and m1 */
7482 0 : vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7483 : DPIO_CHV_M1_DIV_BY_2 |
7484 : 1 << DPIO_CHV_N_DIV_SHIFT);
7485 :
7486 : /* M2 fraction division */
7487 0 : vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7488 :
7489 : /* M2 fraction division enable */
7490 0 : dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7491 0 : dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7492 0 : dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7493 0 : if (bestm2_frac)
7494 0 : dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7495 0 : vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7496 :
7497 : /* Program digital lock detect threshold */
7498 0 : dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7499 0 : dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7500 : DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7501 0 : dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7502 0 : if (!bestm2_frac)
7503 0 : dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7504 0 : vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7505 :
7506 : /* Loop filter */
7507 0 : if (vco == 5400000) {
7508 : loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7509 : loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7510 : loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7511 : tribuf_calcntr = 0x9;
7512 0 : } else if (vco <= 6200000) {
7513 : loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7514 : loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7515 : loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7516 : tribuf_calcntr = 0x9;
7517 0 : } else if (vco <= 6480000) {
7518 : loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7519 : loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7520 : loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7521 : tribuf_calcntr = 0x8;
7522 0 : } else {
7523 : /* Not supported. Apply the same limits as in the max case */
7524 : loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7525 : loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7526 : loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7527 : tribuf_calcntr = 0;
7528 : }
7529 0 : vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7530 :
7531 0 : dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7532 0 : dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7533 0 : dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7534 0 : vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7535 :
7536 : /* AFC Recal */
7537 0 : vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7538 0 : vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7539 : DPIO_AFC_RECAL);
7540 :
7541 0 : mutex_unlock(&dev_priv->sb_lock);
7542 0 : }
7543 :
7544 : /**
7545 : * vlv_force_pll_on - forcibly enable just the PLL
7546 : * @dev_priv: i915 private structure
7547 : * @pipe: pipe PLL to enable
7548 : * @dpll: PLL configuration
7549 : *
7550 : * Enable the PLL for @pipe using the supplied @dpll config. To be used
7551 : * in cases where we need the PLL enabled even when @pipe is not going to
7552 : * be enabled.
7553 : */
7554 0 : void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7555 : const struct dpll *dpll)
7556 : {
7557 : struct intel_crtc *crtc =
7558 0 : to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7559 0 : struct intel_crtc_state pipe_config = {
7560 0 : .base.crtc = &crtc->base,
7561 : .pixel_multiplier = 1,
7562 0 : .dpll = *dpll,
7563 : };
7564 :
7565 0 : if (IS_CHERRYVIEW(dev)) {
7566 0 : chv_compute_dpll(crtc, &pipe_config);
7567 0 : chv_prepare_pll(crtc, &pipe_config);
7568 0 : chv_enable_pll(crtc, &pipe_config);
7569 0 : } else {
7570 0 : vlv_compute_dpll(crtc, &pipe_config);
7571 0 : vlv_prepare_pll(crtc, &pipe_config);
7572 0 : vlv_enable_pll(crtc, &pipe_config);
7573 : }
7574 0 : }
7575 :
7576 : /**
7577 : * vlv_force_pll_off - forcibly disable just the PLL
7578 : * @dev_priv: i915 private structure
7579 : * @pipe: pipe PLL to disable
7580 : *
7581 : * Disable the PLL for @pipe. To be used in cases where we need
7582 : * the PLL enabled even when @pipe is not going to be enabled.
7583 : */
7584 0 : void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7585 : {
7586 0 : if (IS_CHERRYVIEW(dev))
7587 0 : chv_disable_pll(to_i915(dev), pipe);
7588 : else
7589 0 : vlv_disable_pll(to_i915(dev), pipe);
7590 0 : }
7591 :
7592 0 : static void i9xx_compute_dpll(struct intel_crtc *crtc,
7593 : struct intel_crtc_state *crtc_state,
7594 : intel_clock_t *reduced_clock,
7595 : int num_connectors)
7596 : {
7597 0 : struct drm_device *dev = crtc->base.dev;
7598 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7599 : u32 dpll;
7600 : bool is_sdvo;
7601 0 : struct dpll *clock = &crtc_state->dpll;
7602 :
7603 0 : i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7604 :
7605 0 : is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7606 0 : intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7607 :
7608 : dpll = DPLL_VGA_MODE_DIS;
7609 :
7610 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7611 0 : dpll |= DPLLB_MODE_LVDS;
7612 : else
7613 : dpll |= DPLLB_MODE_DAC_SERIAL;
7614 :
7615 0 : if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7616 0 : dpll |= (crtc_state->pixel_multiplier - 1)
7617 0 : << SDVO_MULTIPLIER_SHIFT_HIRES;
7618 0 : }
7619 :
7620 0 : if (is_sdvo)
7621 0 : dpll |= DPLL_SDVO_HIGH_SPEED;
7622 :
7623 0 : if (crtc_state->has_dp_encoder)
7624 0 : dpll |= DPLL_SDVO_HIGH_SPEED;
7625 :
7626 : /* compute bitmask from p1 value */
7627 0 : if (IS_PINEVIEW(dev))
7628 0 : dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7629 : else {
7630 0 : dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7631 0 : if (IS_G4X(dev) && reduced_clock)
7632 0 : dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7633 : }
7634 0 : switch (clock->p2) {
7635 : case 5:
7636 0 : dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7637 0 : break;
7638 : case 7:
7639 0 : dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7640 0 : break;
7641 : case 10:
7642 : dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7643 0 : break;
7644 : case 14:
7645 : dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7646 0 : break;
7647 : }
7648 0 : if (INTEL_INFO(dev)->gen >= 4)
7649 0 : dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7650 :
7651 0 : if (crtc_state->sdvo_tv_clock)
7652 0 : dpll |= PLL_REF_INPUT_TVCLKINBC;
7653 0 : else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7654 0 : intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7655 0 : dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7656 : else
7657 : dpll |= PLL_REF_INPUT_DREFCLK;
7658 :
7659 0 : dpll |= DPLL_VCO_ENABLE;
7660 0 : crtc_state->dpll_hw_state.dpll = dpll;
7661 :
7662 0 : if (INTEL_INFO(dev)->gen >= 4) {
7663 0 : u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7664 0 : << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7665 0 : crtc_state->dpll_hw_state.dpll_md = dpll_md;
7666 0 : }
7667 0 : }
7668 :
7669 0 : static void i8xx_compute_dpll(struct intel_crtc *crtc,
7670 : struct intel_crtc_state *crtc_state,
7671 : intel_clock_t *reduced_clock,
7672 : int num_connectors)
7673 : {
7674 0 : struct drm_device *dev = crtc->base.dev;
7675 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7676 : u32 dpll;
7677 0 : struct dpll *clock = &crtc_state->dpll;
7678 :
7679 0 : i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7680 :
7681 : dpll = DPLL_VGA_MODE_DIS;
7682 :
7683 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7684 0 : dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7685 0 : } else {
7686 0 : if (clock->p1 == 2)
7687 0 : dpll |= PLL_P1_DIVIDE_BY_TWO;
7688 : else
7689 0 : dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7690 0 : if (clock->p2 == 4)
7691 0 : dpll |= PLL_P2_DIVIDE_BY_4;
7692 : }
7693 :
7694 0 : if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7695 0 : dpll |= DPLL_DVO_2X_MODE;
7696 :
7697 0 : if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7698 0 : intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7699 0 : dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7700 : else
7701 : dpll |= PLL_REF_INPUT_DREFCLK;
7702 :
7703 0 : dpll |= DPLL_VCO_ENABLE;
7704 0 : crtc_state->dpll_hw_state.dpll = dpll;
7705 0 : }
7706 :
7707 0 : static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7708 : {
7709 0 : struct drm_device *dev = intel_crtc->base.dev;
7710 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7711 0 : enum pipe pipe = intel_crtc->pipe;
7712 0 : enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7713 0 : const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7714 : uint32_t crtc_vtotal, crtc_vblank_end;
7715 : int vsyncshift = 0;
7716 :
7717 : /* We need to be careful not to changed the adjusted mode, for otherwise
7718 : * the hw state checker will get angry at the mismatch. */
7719 0 : crtc_vtotal = adjusted_mode->crtc_vtotal;
7720 0 : crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7721 :
7722 0 : if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7723 : /* the chip adds 2 halflines automatically */
7724 0 : crtc_vtotal -= 1;
7725 0 : crtc_vblank_end -= 1;
7726 :
7727 0 : if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7728 0 : vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7729 : else
7730 0 : vsyncshift = adjusted_mode->crtc_hsync_start -
7731 0 : adjusted_mode->crtc_htotal / 2;
7732 0 : if (vsyncshift < 0)
7733 0 : vsyncshift += adjusted_mode->crtc_htotal;
7734 : }
7735 :
7736 0 : if (INTEL_INFO(dev)->gen > 3)
7737 0 : I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7738 :
7739 0 : I915_WRITE(HTOTAL(cpu_transcoder),
7740 : (adjusted_mode->crtc_hdisplay - 1) |
7741 : ((adjusted_mode->crtc_htotal - 1) << 16));
7742 0 : I915_WRITE(HBLANK(cpu_transcoder),
7743 : (adjusted_mode->crtc_hblank_start - 1) |
7744 : ((adjusted_mode->crtc_hblank_end - 1) << 16));
7745 0 : I915_WRITE(HSYNC(cpu_transcoder),
7746 : (adjusted_mode->crtc_hsync_start - 1) |
7747 : ((adjusted_mode->crtc_hsync_end - 1) << 16));
7748 :
7749 0 : I915_WRITE(VTOTAL(cpu_transcoder),
7750 : (adjusted_mode->crtc_vdisplay - 1) |
7751 : ((crtc_vtotal - 1) << 16));
7752 0 : I915_WRITE(VBLANK(cpu_transcoder),
7753 : (adjusted_mode->crtc_vblank_start - 1) |
7754 : ((crtc_vblank_end - 1) << 16));
7755 0 : I915_WRITE(VSYNC(cpu_transcoder),
7756 : (adjusted_mode->crtc_vsync_start - 1) |
7757 : ((adjusted_mode->crtc_vsync_end - 1) << 16));
7758 :
7759 : /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7760 : * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7761 : * documented on the DDI_FUNC_CTL register description, EDP Input Select
7762 : * bits. */
7763 0 : if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7764 0 : (pipe == PIPE_B || pipe == PIPE_C))
7765 0 : I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7766 :
7767 : /* pipesrc controls the size that is scaled from, which should
7768 : * always be the user's requested size.
7769 : */
7770 0 : I915_WRITE(PIPESRC(pipe),
7771 : ((intel_crtc->config->pipe_src_w - 1) << 16) |
7772 : (intel_crtc->config->pipe_src_h - 1));
7773 0 : }
7774 :
7775 0 : static void intel_get_pipe_timings(struct intel_crtc *crtc,
7776 : struct intel_crtc_state *pipe_config)
7777 : {
7778 0 : struct drm_device *dev = crtc->base.dev;
7779 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7780 0 : enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7781 : uint32_t tmp;
7782 :
7783 0 : tmp = I915_READ(HTOTAL(cpu_transcoder));
7784 0 : pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7785 0 : pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7786 0 : tmp = I915_READ(HBLANK(cpu_transcoder));
7787 0 : pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7788 0 : pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7789 0 : tmp = I915_READ(HSYNC(cpu_transcoder));
7790 0 : pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7791 0 : pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7792 :
7793 0 : tmp = I915_READ(VTOTAL(cpu_transcoder));
7794 0 : pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7795 0 : pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7796 0 : tmp = I915_READ(VBLANK(cpu_transcoder));
7797 0 : pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7798 0 : pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7799 0 : tmp = I915_READ(VSYNC(cpu_transcoder));
7800 0 : pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7801 0 : pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7802 :
7803 0 : if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7804 0 : pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7805 0 : pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7806 0 : pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7807 0 : }
7808 :
7809 0 : tmp = I915_READ(PIPESRC(crtc->pipe));
7810 0 : pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7811 0 : pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7812 :
7813 0 : pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7814 0 : pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7815 0 : }
7816 :
7817 0 : void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7818 : struct intel_crtc_state *pipe_config)
7819 : {
7820 0 : mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7821 0 : mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7822 0 : mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7823 0 : mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7824 :
7825 0 : mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7826 0 : mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7827 0 : mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7828 0 : mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7829 :
7830 0 : mode->flags = pipe_config->base.adjusted_mode.flags;
7831 0 : mode->type = DRM_MODE_TYPE_DRIVER;
7832 :
7833 0 : mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7834 0 : mode->flags |= pipe_config->base.adjusted_mode.flags;
7835 :
7836 0 : mode->hsync = drm_mode_hsync(mode);
7837 0 : mode->vrefresh = drm_mode_vrefresh(mode);
7838 0 : drm_mode_set_name(mode);
7839 0 : }
7840 :
7841 0 : static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7842 : {
7843 0 : struct drm_device *dev = intel_crtc->base.dev;
7844 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7845 : uint32_t pipeconf;
7846 :
7847 : pipeconf = 0;
7848 :
7849 0 : if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7850 0 : (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7851 0 : pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7852 :
7853 0 : if (intel_crtc->config->double_wide)
7854 0 : pipeconf |= PIPECONF_DOUBLE_WIDE;
7855 :
7856 : /* only g4x and later have fancy bpc/dither controls */
7857 0 : if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
7858 : /* Bspec claims that we can't use dithering for 30bpp pipes. */
7859 0 : if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7860 0 : pipeconf |= PIPECONF_DITHER_EN |
7861 : PIPECONF_DITHER_TYPE_SP;
7862 :
7863 0 : switch (intel_crtc->config->pipe_bpp) {
7864 : case 18:
7865 0 : pipeconf |= PIPECONF_6BPC;
7866 0 : break;
7867 : case 24:
7868 : pipeconf |= PIPECONF_8BPC;
7869 0 : break;
7870 : case 30:
7871 0 : pipeconf |= PIPECONF_10BPC;
7872 0 : break;
7873 : default:
7874 : /* Case prevented by intel_choose_pipe_bpp_dither. */
7875 0 : BUG();
7876 : }
7877 : }
7878 :
7879 0 : if (HAS_PIPE_CXSR(dev)) {
7880 0 : if (intel_crtc->lowfreq_avail) {
7881 : DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7882 0 : pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7883 0 : } else {
7884 : DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7885 : }
7886 : }
7887 :
7888 0 : if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7889 0 : if (INTEL_INFO(dev)->gen < 4 ||
7890 0 : intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7891 0 : pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7892 : else
7893 0 : pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7894 : } else
7895 : pipeconf |= PIPECONF_PROGRESSIVE;
7896 :
7897 0 : if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
7898 0 : pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7899 :
7900 0 : I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7901 0 : POSTING_READ(PIPECONF(intel_crtc->pipe));
7902 0 : }
7903 :
7904 0 : static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7905 : struct intel_crtc_state *crtc_state)
7906 : {
7907 0 : struct drm_device *dev = crtc->base.dev;
7908 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7909 : int refclk, num_connectors = 0;
7910 0 : intel_clock_t clock;
7911 : bool ok;
7912 : bool is_dsi = false;
7913 : struct intel_encoder *encoder;
7914 : const intel_limit_t *limit;
7915 0 : struct drm_atomic_state *state = crtc_state->base.state;
7916 : struct drm_connector *connector;
7917 : struct drm_connector_state *connector_state;
7918 : int i;
7919 :
7920 0 : memset(&crtc_state->dpll_hw_state, 0,
7921 : sizeof(crtc_state->dpll_hw_state));
7922 :
7923 0 : for_each_connector_in_state(state, connector, connector_state, i) {
7924 0 : if (connector_state->crtc != &crtc->base)
7925 : continue;
7926 :
7927 0 : encoder = to_intel_encoder(connector_state->best_encoder);
7928 :
7929 0 : switch (encoder->type) {
7930 : case INTEL_OUTPUT_DSI:
7931 : is_dsi = true;
7932 0 : break;
7933 : default:
7934 : break;
7935 : }
7936 :
7937 0 : num_connectors++;
7938 0 : }
7939 :
7940 0 : if (is_dsi)
7941 0 : return 0;
7942 :
7943 0 : if (!crtc_state->clock_set) {
7944 0 : refclk = i9xx_get_refclk(crtc_state, num_connectors);
7945 :
7946 : /*
7947 : * Returns a set of divisors for the desired target clock with
7948 : * the given refclk, or FALSE. The returned values represent
7949 : * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7950 : * 2) / p1 / p2.
7951 : */
7952 0 : limit = intel_limit(crtc_state, refclk);
7953 0 : ok = dev_priv->display.find_dpll(limit, crtc_state,
7954 0 : crtc_state->port_clock,
7955 : refclk, NULL, &clock);
7956 0 : if (!ok) {
7957 0 : DRM_ERROR("Couldn't find PLL settings for mode!\n");
7958 0 : return -EINVAL;
7959 : }
7960 :
7961 : /* Compat-code for transition, will disappear. */
7962 0 : crtc_state->dpll.n = clock.n;
7963 0 : crtc_state->dpll.m1 = clock.m1;
7964 0 : crtc_state->dpll.m2 = clock.m2;
7965 0 : crtc_state->dpll.p1 = clock.p1;
7966 0 : crtc_state->dpll.p2 = clock.p2;
7967 0 : }
7968 :
7969 0 : if (IS_GEN2(dev)) {
7970 0 : i8xx_compute_dpll(crtc, crtc_state, NULL,
7971 : num_connectors);
7972 0 : } else if (IS_CHERRYVIEW(dev)) {
7973 0 : chv_compute_dpll(crtc, crtc_state);
7974 0 : } else if (IS_VALLEYVIEW(dev)) {
7975 0 : vlv_compute_dpll(crtc, crtc_state);
7976 0 : } else {
7977 0 : i9xx_compute_dpll(crtc, crtc_state, NULL,
7978 : num_connectors);
7979 : }
7980 :
7981 0 : return 0;
7982 0 : }
7983 :
7984 0 : static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7985 : struct intel_crtc_state *pipe_config)
7986 : {
7987 0 : struct drm_device *dev = crtc->base.dev;
7988 0 : struct drm_i915_private *dev_priv = dev->dev_private;
7989 : uint32_t tmp;
7990 :
7991 0 : if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7992 0 : return;
7993 :
7994 0 : tmp = I915_READ(PFIT_CONTROL);
7995 0 : if (!(tmp & PFIT_ENABLE))
7996 0 : return;
7997 :
7998 : /* Check whether the pfit is attached to our pipe. */
7999 0 : if (INTEL_INFO(dev)->gen < 4) {
8000 0 : if (crtc->pipe != PIPE_B)
8001 0 : return;
8002 : } else {
8003 0 : if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8004 0 : return;
8005 : }
8006 :
8007 0 : pipe_config->gmch_pfit.control = tmp;
8008 0 : pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8009 0 : if (INTEL_INFO(dev)->gen < 5)
8010 0 : pipe_config->gmch_pfit.lvds_border_bits =
8011 0 : I915_READ(LVDS) & LVDS_BORDER_ENABLE;
8012 0 : }
8013 :
8014 0 : static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8015 : struct intel_crtc_state *pipe_config)
8016 : {
8017 0 : struct drm_device *dev = crtc->base.dev;
8018 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8019 0 : int pipe = pipe_config->cpu_transcoder;
8020 0 : intel_clock_t clock;
8021 : u32 mdiv;
8022 : int refclk = 100000;
8023 :
8024 : /* In case of MIPI DPLL will not even be used */
8025 0 : if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8026 0 : return;
8027 :
8028 0 : mutex_lock(&dev_priv->sb_lock);
8029 0 : mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8030 0 : mutex_unlock(&dev_priv->sb_lock);
8031 :
8032 0 : clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8033 0 : clock.m2 = mdiv & DPIO_M2DIV_MASK;
8034 0 : clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8035 0 : clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8036 0 : clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8037 :
8038 0 : pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8039 0 : }
8040 :
8041 : static void
8042 0 : i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8043 : struct intel_initial_plane_config *plane_config)
8044 : {
8045 0 : struct drm_device *dev = crtc->base.dev;
8046 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8047 : u32 val, base, offset;
8048 0 : int pipe = crtc->pipe, plane = crtc->plane;
8049 : int fourcc, pixel_format;
8050 : unsigned int aligned_height;
8051 : struct drm_framebuffer *fb;
8052 : struct intel_framebuffer *intel_fb;
8053 :
8054 0 : val = I915_READ(DSPCNTR(plane));
8055 0 : if (!(val & DISPLAY_PLANE_ENABLE))
8056 0 : return;
8057 :
8058 0 : intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8059 0 : if (!intel_fb) {
8060 : DRM_DEBUG_KMS("failed to alloc fb\n");
8061 0 : return;
8062 : }
8063 :
8064 0 : fb = &intel_fb->base;
8065 :
8066 0 : if (INTEL_INFO(dev)->gen >= 4) {
8067 0 : if (val & DISPPLANE_TILED) {
8068 0 : plane_config->tiling = I915_TILING_X;
8069 0 : fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8070 0 : }
8071 : }
8072 :
8073 0 : pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8074 0 : fourcc = i9xx_format_to_fourcc(pixel_format);
8075 0 : fb->pixel_format = fourcc;
8076 0 : fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8077 :
8078 0 : if (INTEL_INFO(dev)->gen >= 4) {
8079 0 : if (plane_config->tiling)
8080 0 : offset = I915_READ(DSPTILEOFF(plane));
8081 : else
8082 0 : offset = I915_READ(DSPLINOFF(plane));
8083 0 : base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8084 0 : } else {
8085 0 : base = I915_READ(DSPADDR(plane));
8086 : }
8087 0 : plane_config->base = base;
8088 :
8089 0 : val = I915_READ(PIPESRC(pipe));
8090 0 : fb->width = ((val >> 16) & 0xfff) + 1;
8091 0 : fb->height = ((val >> 0) & 0xfff) + 1;
8092 :
8093 0 : val = I915_READ(DSPSTRIDE(pipe));
8094 0 : fb->pitches[0] = val & 0xffffffc0;
8095 :
8096 0 : aligned_height = intel_fb_align_height(dev, fb->height,
8097 0 : fb->pixel_format,
8098 0 : fb->modifier[0]);
8099 :
8100 0 : plane_config->size = fb->pitches[0] * aligned_height;
8101 :
8102 : DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8103 : pipe_name(pipe), plane, fb->width, fb->height,
8104 : fb->bits_per_pixel, base, fb->pitches[0],
8105 : plane_config->size);
8106 :
8107 0 : plane_config->fb = intel_fb;
8108 0 : }
8109 :
8110 0 : static void chv_crtc_clock_get(struct intel_crtc *crtc,
8111 : struct intel_crtc_state *pipe_config)
8112 : {
8113 0 : struct drm_device *dev = crtc->base.dev;
8114 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8115 0 : int pipe = pipe_config->cpu_transcoder;
8116 0 : enum dpio_channel port = vlv_pipe_to_channel(pipe);
8117 0 : intel_clock_t clock;
8118 : u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8119 : int refclk = 100000;
8120 :
8121 0 : mutex_lock(&dev_priv->sb_lock);
8122 0 : cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8123 0 : pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8124 0 : pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8125 0 : pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8126 0 : pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8127 0 : mutex_unlock(&dev_priv->sb_lock);
8128 :
8129 0 : clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8130 0 : clock.m2 = (pll_dw0 & 0xff) << 22;
8131 0 : if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8132 0 : clock.m2 |= pll_dw2 & 0x3fffff;
8133 0 : clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8134 0 : clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8135 0 : clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8136 :
8137 0 : pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8138 0 : }
8139 :
8140 0 : static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8141 : struct intel_crtc_state *pipe_config)
8142 : {
8143 0 : struct drm_device *dev = crtc->base.dev;
8144 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8145 : uint32_t tmp;
8146 :
8147 0 : if (!intel_display_power_is_enabled(dev_priv,
8148 0 : POWER_DOMAIN_PIPE(crtc->pipe)))
8149 0 : return false;
8150 :
8151 0 : pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8152 0 : pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8153 :
8154 0 : tmp = I915_READ(PIPECONF(crtc->pipe));
8155 0 : if (!(tmp & PIPECONF_ENABLE))
8156 0 : return false;
8157 :
8158 0 : if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
8159 0 : switch (tmp & PIPECONF_BPC_MASK) {
8160 : case PIPECONF_6BPC:
8161 0 : pipe_config->pipe_bpp = 18;
8162 0 : break;
8163 : case PIPECONF_8BPC:
8164 0 : pipe_config->pipe_bpp = 24;
8165 0 : break;
8166 : case PIPECONF_10BPC:
8167 0 : pipe_config->pipe_bpp = 30;
8168 0 : break;
8169 : default:
8170 : break;
8171 : }
8172 : }
8173 :
8174 0 : if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
8175 0 : pipe_config->limited_color_range = true;
8176 :
8177 0 : if (INTEL_INFO(dev)->gen < 4)
8178 0 : pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8179 :
8180 0 : intel_get_pipe_timings(crtc, pipe_config);
8181 :
8182 0 : i9xx_get_pfit_config(crtc, pipe_config);
8183 :
8184 0 : if (INTEL_INFO(dev)->gen >= 4) {
8185 0 : tmp = I915_READ(DPLL_MD(crtc->pipe));
8186 0 : pipe_config->pixel_multiplier =
8187 0 : ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8188 0 : >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8189 0 : pipe_config->dpll_hw_state.dpll_md = tmp;
8190 0 : } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8191 0 : tmp = I915_READ(DPLL(crtc->pipe));
8192 0 : pipe_config->pixel_multiplier =
8193 0 : ((tmp & SDVO_MULTIPLIER_MASK)
8194 0 : >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8195 0 : } else {
8196 : /* Note that on i915G/GM the pixel multiplier is in the sdvo
8197 : * port and will be fixed up in the encoder->get_config
8198 : * function. */
8199 0 : pipe_config->pixel_multiplier = 1;
8200 : }
8201 0 : pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8202 0 : if (!IS_VALLEYVIEW(dev)) {
8203 : /*
8204 : * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8205 : * on 830. Filter it out here so that we don't
8206 : * report errors due to that.
8207 : */
8208 0 : if (IS_I830(dev))
8209 0 : pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8210 :
8211 0 : pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8212 0 : pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8213 0 : } else {
8214 : /* Mask out read-only status bits. */
8215 0 : pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8216 : DPLL_PORTC_READY_MASK |
8217 : DPLL_PORTB_READY_MASK);
8218 : }
8219 :
8220 0 : if (IS_CHERRYVIEW(dev))
8221 0 : chv_crtc_clock_get(crtc, pipe_config);
8222 0 : else if (IS_VALLEYVIEW(dev))
8223 0 : vlv_crtc_clock_get(crtc, pipe_config);
8224 : else
8225 0 : i9xx_crtc_clock_get(crtc, pipe_config);
8226 :
8227 : /*
8228 : * Normally the dotclock is filled in by the encoder .get_config()
8229 : * but in case the pipe is enabled w/o any ports we need a sane
8230 : * default.
8231 : */
8232 0 : pipe_config->base.adjusted_mode.crtc_clock =
8233 0 : pipe_config->port_clock / pipe_config->pixel_multiplier;
8234 :
8235 0 : return true;
8236 0 : }
8237 :
8238 0 : static void ironlake_init_pch_refclk(struct drm_device *dev)
8239 : {
8240 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8241 : struct intel_encoder *encoder;
8242 : int i;
8243 : u32 val, final;
8244 : bool has_lvds = false;
8245 : bool has_cpu_edp = false;
8246 : bool has_panel = false;
8247 : bool has_ck505 = false;
8248 : bool can_ssc = false;
8249 : bool using_ssc_source = false;
8250 :
8251 : /* We need to take the global config into account */
8252 0 : for_each_intel_encoder(dev, encoder) {
8253 0 : switch (encoder->type) {
8254 : case INTEL_OUTPUT_LVDS:
8255 : has_panel = true;
8256 : has_lvds = true;
8257 0 : break;
8258 : case INTEL_OUTPUT_EDP:
8259 : has_panel = true;
8260 0 : if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8261 0 : has_cpu_edp = true;
8262 : break;
8263 : default:
8264 : break;
8265 : }
8266 : }
8267 :
8268 0 : if (HAS_PCH_IBX(dev)) {
8269 0 : has_ck505 = dev_priv->vbt.display_clock_mode;
8270 : can_ssc = has_ck505;
8271 0 : } else {
8272 : has_ck505 = false;
8273 : can_ssc = true;
8274 : }
8275 :
8276 : /* Check if any DPLLs are using the SSC source */
8277 0 : for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8278 0 : u32 temp = I915_READ(PCH_DPLL(i));
8279 :
8280 0 : if (!(temp & DPLL_VCO_ENABLE))
8281 0 : continue;
8282 :
8283 0 : if ((temp & PLL_REF_INPUT_MASK) ==
8284 : PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8285 : using_ssc_source = true;
8286 0 : break;
8287 : }
8288 0 : }
8289 :
8290 : DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8291 : has_panel, has_lvds, has_ck505, using_ssc_source);
8292 :
8293 : /* Ironlake: try to setup display ref clock before DPLL
8294 : * enabling. This is only under driver's control after
8295 : * PCH B stepping, previous chipset stepping should be
8296 : * ignoring this setting.
8297 : */
8298 0 : val = I915_READ(PCH_DREF_CONTROL);
8299 :
8300 : /* As we must carefully and slowly disable/enable each source in turn,
8301 : * compute the final state we want first and check if we need to
8302 : * make any changes at all.
8303 : */
8304 : final = val;
8305 0 : final &= ~DREF_NONSPREAD_SOURCE_MASK;
8306 0 : if (has_ck505)
8307 0 : final |= DREF_NONSPREAD_CK505_ENABLE;
8308 : else
8309 0 : final |= DREF_NONSPREAD_SOURCE_ENABLE;
8310 :
8311 0 : final &= ~DREF_SSC_SOURCE_MASK;
8312 0 : final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8313 0 : final &= ~DREF_SSC1_ENABLE;
8314 :
8315 0 : if (has_panel) {
8316 0 : final |= DREF_SSC_SOURCE_ENABLE;
8317 :
8318 0 : if (intel_panel_use_ssc(dev_priv) && can_ssc)
8319 0 : final |= DREF_SSC1_ENABLE;
8320 :
8321 0 : if (has_cpu_edp) {
8322 0 : if (intel_panel_use_ssc(dev_priv) && can_ssc)
8323 0 : final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8324 : else
8325 0 : final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8326 : } else
8327 : final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8328 0 : } else if (using_ssc_source) {
8329 0 : final |= DREF_SSC_SOURCE_ENABLE;
8330 0 : final |= DREF_SSC1_ENABLE;
8331 0 : }
8332 :
8333 0 : if (final == val)
8334 0 : return;
8335 :
8336 : /* Always enable nonspread source */
8337 : val &= ~DREF_NONSPREAD_SOURCE_MASK;
8338 :
8339 0 : if (has_ck505)
8340 0 : val |= DREF_NONSPREAD_CK505_ENABLE;
8341 : else
8342 0 : val |= DREF_NONSPREAD_SOURCE_ENABLE;
8343 :
8344 0 : if (has_panel) {
8345 0 : val &= ~DREF_SSC_SOURCE_MASK;
8346 0 : val |= DREF_SSC_SOURCE_ENABLE;
8347 :
8348 : /* SSC must be turned on before enabling the CPU output */
8349 0 : if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8350 : DRM_DEBUG_KMS("Using SSC on panel\n");
8351 0 : val |= DREF_SSC1_ENABLE;
8352 0 : } else
8353 0 : val &= ~DREF_SSC1_ENABLE;
8354 :
8355 : /* Get SSC going before enabling the outputs */
8356 0 : I915_WRITE(PCH_DREF_CONTROL, val);
8357 0 : POSTING_READ(PCH_DREF_CONTROL);
8358 0 : udelay(200);
8359 :
8360 0 : val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8361 :
8362 : /* Enable CPU source on CPU attached eDP */
8363 0 : if (has_cpu_edp) {
8364 0 : if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8365 : DRM_DEBUG_KMS("Using SSC on eDP\n");
8366 0 : val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8367 0 : } else
8368 0 : val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8369 : } else
8370 : val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8371 :
8372 0 : I915_WRITE(PCH_DREF_CONTROL, val);
8373 0 : POSTING_READ(PCH_DREF_CONTROL);
8374 0 : udelay(200);
8375 0 : } else {
8376 : DRM_DEBUG_KMS("Disabling CPU source output\n");
8377 :
8378 0 : val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8379 :
8380 : /* Turn off CPU output */
8381 : val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8382 :
8383 0 : I915_WRITE(PCH_DREF_CONTROL, val);
8384 0 : POSTING_READ(PCH_DREF_CONTROL);
8385 0 : udelay(200);
8386 :
8387 0 : if (!using_ssc_source) {
8388 : DRM_DEBUG_KMS("Disabling SSC source\n");
8389 :
8390 : /* Turn off the SSC source */
8391 0 : val &= ~DREF_SSC_SOURCE_MASK;
8392 : val |= DREF_SSC_SOURCE_DISABLE;
8393 :
8394 : /* Turn off SSC1 */
8395 0 : val &= ~DREF_SSC1_ENABLE;
8396 :
8397 0 : I915_WRITE(PCH_DREF_CONTROL, val);
8398 0 : POSTING_READ(PCH_DREF_CONTROL);
8399 0 : udelay(200);
8400 0 : }
8401 : }
8402 :
8403 0 : BUG_ON(val != final);
8404 0 : }
8405 :
8406 0 : static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8407 : {
8408 : uint32_t tmp;
8409 :
8410 0 : tmp = I915_READ(SOUTH_CHICKEN2);
8411 0 : tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8412 0 : I915_WRITE(SOUTH_CHICKEN2, tmp);
8413 :
8414 0 : if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8415 : FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8416 0 : DRM_ERROR("FDI mPHY reset assert timeout\n");
8417 :
8418 0 : tmp = I915_READ(SOUTH_CHICKEN2);
8419 0 : tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8420 0 : I915_WRITE(SOUTH_CHICKEN2, tmp);
8421 :
8422 0 : if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8423 : FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8424 0 : DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8425 0 : }
8426 :
8427 : /* WaMPhyProgramming:hsw */
8428 0 : static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8429 : {
8430 : uint32_t tmp;
8431 :
8432 0 : tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8433 0 : tmp &= ~(0xFF << 24);
8434 0 : tmp |= (0x12 << 24);
8435 0 : intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8436 :
8437 0 : tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8438 0 : tmp |= (1 << 11);
8439 0 : intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8440 :
8441 0 : tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8442 0 : tmp |= (1 << 11);
8443 0 : intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8444 :
8445 0 : tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8446 0 : tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8447 0 : intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8448 :
8449 0 : tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8450 0 : tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8451 0 : intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8452 :
8453 0 : tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8454 0 : tmp &= ~(7 << 13);
8455 0 : tmp |= (5 << 13);
8456 0 : intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8457 :
8458 0 : tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8459 0 : tmp &= ~(7 << 13);
8460 0 : tmp |= (5 << 13);
8461 0 : intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8462 :
8463 0 : tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8464 0 : tmp &= ~0xFF;
8465 0 : tmp |= 0x1C;
8466 0 : intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8467 :
8468 0 : tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8469 0 : tmp &= ~0xFF;
8470 0 : tmp |= 0x1C;
8471 0 : intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8472 :
8473 0 : tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8474 0 : tmp &= ~(0xFF << 16);
8475 0 : tmp |= (0x1C << 16);
8476 0 : intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8477 :
8478 0 : tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8479 0 : tmp &= ~(0xFF << 16);
8480 0 : tmp |= (0x1C << 16);
8481 0 : intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8482 :
8483 0 : tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8484 0 : tmp |= (1 << 27);
8485 0 : intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8486 :
8487 0 : tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8488 0 : tmp |= (1 << 27);
8489 0 : intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8490 :
8491 0 : tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8492 0 : tmp &= ~(0xF << 28);
8493 0 : tmp |= (4 << 28);
8494 0 : intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8495 :
8496 0 : tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8497 0 : tmp &= ~(0xF << 28);
8498 0 : tmp |= (4 << 28);
8499 0 : intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8500 0 : }
8501 :
8502 : /* Implements 3 different sequences from BSpec chapter "Display iCLK
8503 : * Programming" based on the parameters passed:
8504 : * - Sequence to enable CLKOUT_DP
8505 : * - Sequence to enable CLKOUT_DP without spread
8506 : * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8507 : */
8508 0 : static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8509 : bool with_fdi)
8510 : {
8511 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8512 : uint32_t reg, tmp;
8513 :
8514 0 : if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8515 0 : with_spread = true;
8516 0 : if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8517 0 : with_fdi = false;
8518 :
8519 0 : mutex_lock(&dev_priv->sb_lock);
8520 :
8521 0 : tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8522 0 : tmp &= ~SBI_SSCCTL_DISABLE;
8523 0 : tmp |= SBI_SSCCTL_PATHALT;
8524 0 : intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8525 :
8526 0 : udelay(24);
8527 :
8528 0 : if (with_spread) {
8529 0 : tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8530 0 : tmp &= ~SBI_SSCCTL_PATHALT;
8531 0 : intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8532 :
8533 0 : if (with_fdi) {
8534 0 : lpt_reset_fdi_mphy(dev_priv);
8535 0 : lpt_program_fdi_mphy(dev_priv);
8536 0 : }
8537 : }
8538 :
8539 0 : reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8540 0 : tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8541 0 : tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8542 0 : intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8543 :
8544 0 : mutex_unlock(&dev_priv->sb_lock);
8545 0 : }
8546 :
8547 : /* Sequence to disable CLKOUT_DP */
8548 0 : static void lpt_disable_clkout_dp(struct drm_device *dev)
8549 : {
8550 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8551 : uint32_t reg, tmp;
8552 :
8553 0 : mutex_lock(&dev_priv->sb_lock);
8554 :
8555 0 : reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8556 0 : tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8557 0 : tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8558 0 : intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8559 :
8560 0 : tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8561 0 : if (!(tmp & SBI_SSCCTL_DISABLE)) {
8562 0 : if (!(tmp & SBI_SSCCTL_PATHALT)) {
8563 0 : tmp |= SBI_SSCCTL_PATHALT;
8564 0 : intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8565 0 : udelay(32);
8566 0 : }
8567 0 : tmp |= SBI_SSCCTL_DISABLE;
8568 0 : intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8569 0 : }
8570 :
8571 0 : mutex_unlock(&dev_priv->sb_lock);
8572 0 : }
8573 :
8574 0 : static void lpt_init_pch_refclk(struct drm_device *dev)
8575 : {
8576 : struct intel_encoder *encoder;
8577 : bool has_vga = false;
8578 :
8579 0 : for_each_intel_encoder(dev, encoder) {
8580 0 : switch (encoder->type) {
8581 : case INTEL_OUTPUT_ANALOG:
8582 : has_vga = true;
8583 0 : break;
8584 : default:
8585 : break;
8586 : }
8587 : }
8588 :
8589 0 : if (has_vga)
8590 0 : lpt_enable_clkout_dp(dev, true, true);
8591 : else
8592 0 : lpt_disable_clkout_dp(dev);
8593 0 : }
8594 :
8595 : /*
8596 : * Initialize reference clocks when the driver loads
8597 : */
8598 0 : void intel_init_pch_refclk(struct drm_device *dev)
8599 : {
8600 0 : if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8601 0 : ironlake_init_pch_refclk(dev);
8602 0 : else if (HAS_PCH_LPT(dev))
8603 0 : lpt_init_pch_refclk(dev);
8604 0 : }
8605 :
8606 0 : static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
8607 : {
8608 0 : struct drm_device *dev = crtc_state->base.crtc->dev;
8609 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8610 0 : struct drm_atomic_state *state = crtc_state->base.state;
8611 : struct drm_connector *connector;
8612 : struct drm_connector_state *connector_state;
8613 : struct intel_encoder *encoder;
8614 : int num_connectors = 0, i;
8615 : bool is_lvds = false;
8616 :
8617 0 : for_each_connector_in_state(state, connector, connector_state, i) {
8618 0 : if (connector_state->crtc != crtc_state->base.crtc)
8619 : continue;
8620 :
8621 0 : encoder = to_intel_encoder(connector_state->best_encoder);
8622 :
8623 0 : switch (encoder->type) {
8624 : case INTEL_OUTPUT_LVDS:
8625 : is_lvds = true;
8626 0 : break;
8627 : default:
8628 : break;
8629 : }
8630 0 : num_connectors++;
8631 0 : }
8632 :
8633 0 : if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
8634 : DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8635 : dev_priv->vbt.lvds_ssc_freq);
8636 0 : return dev_priv->vbt.lvds_ssc_freq;
8637 : }
8638 :
8639 0 : return 120000;
8640 0 : }
8641 :
8642 0 : static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8643 : {
8644 0 : struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8645 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8646 0 : int pipe = intel_crtc->pipe;
8647 : uint32_t val;
8648 :
8649 : val = 0;
8650 :
8651 0 : switch (intel_crtc->config->pipe_bpp) {
8652 : case 18:
8653 : val |= PIPECONF_6BPC;
8654 0 : break;
8655 : case 24:
8656 : val |= PIPECONF_8BPC;
8657 0 : break;
8658 : case 30:
8659 : val |= PIPECONF_10BPC;
8660 0 : break;
8661 : case 36:
8662 : val |= PIPECONF_12BPC;
8663 0 : break;
8664 : default:
8665 : /* Case prevented by intel_choose_pipe_bpp_dither. */
8666 0 : BUG();
8667 : }
8668 :
8669 0 : if (intel_crtc->config->dither)
8670 0 : val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8671 :
8672 0 : if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8673 0 : val |= PIPECONF_INTERLACED_ILK;
8674 : else
8675 : val |= PIPECONF_PROGRESSIVE;
8676 :
8677 0 : if (intel_crtc->config->limited_color_range)
8678 0 : val |= PIPECONF_COLOR_RANGE_SELECT;
8679 :
8680 0 : I915_WRITE(PIPECONF(pipe), val);
8681 0 : POSTING_READ(PIPECONF(pipe));
8682 0 : }
8683 :
8684 : /*
8685 : * Set up the pipe CSC unit.
8686 : *
8687 : * Currently only full range RGB to limited range RGB conversion
8688 : * is supported, but eventually this should handle various
8689 : * RGB<->YCbCr scenarios as well.
8690 : */
8691 0 : static void intel_set_pipe_csc(struct drm_crtc *crtc)
8692 : {
8693 0 : struct drm_device *dev = crtc->dev;
8694 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8695 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8696 0 : int pipe = intel_crtc->pipe;
8697 : uint16_t coeff = 0x7800; /* 1.0 */
8698 :
8699 : /*
8700 : * TODO: Check what kind of values actually come out of the pipe
8701 : * with these coeff/postoff values and adjust to get the best
8702 : * accuracy. Perhaps we even need to take the bpc value into
8703 : * consideration.
8704 : */
8705 :
8706 0 : if (intel_crtc->config->limited_color_range)
8707 : coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8708 :
8709 : /*
8710 : * GY/GU and RY/RU should be the other way around according
8711 : * to BSpec, but reality doesn't agree. Just set them up in
8712 : * a way that results in the correct picture.
8713 : */
8714 0 : I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8715 0 : I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8716 :
8717 0 : I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8718 0 : I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8719 :
8720 0 : I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8721 0 : I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8722 :
8723 0 : I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8724 0 : I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8725 0 : I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8726 :
8727 0 : if (INTEL_INFO(dev)->gen > 6) {
8728 : uint16_t postoff = 0;
8729 :
8730 0 : if (intel_crtc->config->limited_color_range)
8731 : postoff = (16 * (1 << 12) / 255) & 0x1fff;
8732 :
8733 0 : I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8734 0 : I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8735 0 : I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8736 :
8737 0 : I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8738 0 : } else {
8739 : uint32_t mode = CSC_MODE_YUV_TO_RGB;
8740 :
8741 0 : if (intel_crtc->config->limited_color_range)
8742 0 : mode |= CSC_BLACK_SCREEN_OFFSET;
8743 :
8744 0 : I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8745 : }
8746 0 : }
8747 :
8748 0 : static void haswell_set_pipeconf(struct drm_crtc *crtc)
8749 : {
8750 0 : struct drm_device *dev = crtc->dev;
8751 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8752 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8753 0 : enum pipe pipe = intel_crtc->pipe;
8754 0 : enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8755 : uint32_t val;
8756 :
8757 : val = 0;
8758 :
8759 0 : if (IS_HASWELL(dev) && intel_crtc->config->dither)
8760 0 : val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8761 :
8762 0 : if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8763 0 : val |= PIPECONF_INTERLACED_ILK;
8764 : else
8765 : val |= PIPECONF_PROGRESSIVE;
8766 :
8767 0 : I915_WRITE(PIPECONF(cpu_transcoder), val);
8768 0 : POSTING_READ(PIPECONF(cpu_transcoder));
8769 :
8770 0 : I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8771 0 : POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
8772 :
8773 0 : if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
8774 : val = 0;
8775 :
8776 0 : switch (intel_crtc->config->pipe_bpp) {
8777 : case 18:
8778 : val |= PIPEMISC_DITHER_6_BPC;
8779 0 : break;
8780 : case 24:
8781 : val |= PIPEMISC_DITHER_8_BPC;
8782 0 : break;
8783 : case 30:
8784 : val |= PIPEMISC_DITHER_10_BPC;
8785 0 : break;
8786 : case 36:
8787 : val |= PIPEMISC_DITHER_12_BPC;
8788 0 : break;
8789 : default:
8790 : /* Case prevented by pipe_config_set_bpp. */
8791 0 : BUG();
8792 : }
8793 :
8794 0 : if (intel_crtc->config->dither)
8795 0 : val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8796 :
8797 0 : I915_WRITE(PIPEMISC(pipe), val);
8798 0 : }
8799 0 : }
8800 :
8801 0 : static bool ironlake_compute_clocks(struct drm_crtc *crtc,
8802 : struct intel_crtc_state *crtc_state,
8803 : intel_clock_t *clock,
8804 : bool *has_reduced_clock,
8805 : intel_clock_t *reduced_clock)
8806 : {
8807 0 : struct drm_device *dev = crtc->dev;
8808 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8809 : int refclk;
8810 : const intel_limit_t *limit;
8811 : bool ret;
8812 :
8813 0 : refclk = ironlake_get_refclk(crtc_state);
8814 :
8815 : /*
8816 : * Returns a set of divisors for the desired target clock with the given
8817 : * refclk, or FALSE. The returned values represent the clock equation:
8818 : * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8819 : */
8820 0 : limit = intel_limit(crtc_state, refclk);
8821 0 : ret = dev_priv->display.find_dpll(limit, crtc_state,
8822 0 : crtc_state->port_clock,
8823 : refclk, NULL, clock);
8824 0 : if (!ret)
8825 0 : return false;
8826 :
8827 0 : return true;
8828 0 : }
8829 :
8830 0 : int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8831 : {
8832 : /*
8833 : * Account for spread spectrum to avoid
8834 : * oversubscribing the link. Max center spread
8835 : * is 2.5%; use 5% for safety's sake.
8836 : */
8837 0 : u32 bps = target_clock * bpp * 21 / 20;
8838 0 : return DIV_ROUND_UP(bps, link_bw * 8);
8839 : }
8840 :
8841 0 : static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8842 : {
8843 0 : return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8844 : }
8845 :
8846 0 : static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8847 : struct intel_crtc_state *crtc_state,
8848 : u32 *fp,
8849 : intel_clock_t *reduced_clock, u32 *fp2)
8850 : {
8851 0 : struct drm_crtc *crtc = &intel_crtc->base;
8852 0 : struct drm_device *dev = crtc->dev;
8853 0 : struct drm_i915_private *dev_priv = dev->dev_private;
8854 0 : struct drm_atomic_state *state = crtc_state->base.state;
8855 : struct drm_connector *connector;
8856 : struct drm_connector_state *connector_state;
8857 : struct intel_encoder *encoder;
8858 : uint32_t dpll;
8859 : int factor, num_connectors = 0, i;
8860 : bool is_lvds = false, is_sdvo = false;
8861 :
8862 0 : for_each_connector_in_state(state, connector, connector_state, i) {
8863 0 : if (connector_state->crtc != crtc_state->base.crtc)
8864 : continue;
8865 :
8866 0 : encoder = to_intel_encoder(connector_state->best_encoder);
8867 :
8868 0 : switch (encoder->type) {
8869 : case INTEL_OUTPUT_LVDS:
8870 : is_lvds = true;
8871 0 : break;
8872 : case INTEL_OUTPUT_SDVO:
8873 : case INTEL_OUTPUT_HDMI:
8874 : is_sdvo = true;
8875 0 : break;
8876 : default:
8877 : break;
8878 : }
8879 :
8880 0 : num_connectors++;
8881 0 : }
8882 :
8883 : /* Enable autotuning of the PLL clock (if permissible) */
8884 : factor = 21;
8885 0 : if (is_lvds) {
8886 0 : if ((intel_panel_use_ssc(dev_priv) &&
8887 0 : dev_priv->vbt.lvds_ssc_freq == 100000) ||
8888 0 : (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8889 0 : factor = 25;
8890 0 : } else if (crtc_state->sdvo_tv_clock)
8891 0 : factor = 20;
8892 :
8893 0 : if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8894 0 : *fp |= FP_CB_TUNE;
8895 :
8896 0 : if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8897 0 : *fp2 |= FP_CB_TUNE;
8898 :
8899 : dpll = 0;
8900 :
8901 0 : if (is_lvds)
8902 0 : dpll |= DPLLB_MODE_LVDS;
8903 : else
8904 : dpll |= DPLLB_MODE_DAC_SERIAL;
8905 :
8906 0 : dpll |= (crtc_state->pixel_multiplier - 1)
8907 0 : << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8908 :
8909 0 : if (is_sdvo)
8910 0 : dpll |= DPLL_SDVO_HIGH_SPEED;
8911 0 : if (crtc_state->has_dp_encoder)
8912 0 : dpll |= DPLL_SDVO_HIGH_SPEED;
8913 :
8914 : /* compute bitmask from p1 value */
8915 0 : dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8916 : /* also FPA1 */
8917 0 : dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8918 :
8919 0 : switch (crtc_state->dpll.p2) {
8920 : case 5:
8921 0 : dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8922 0 : break;
8923 : case 7:
8924 0 : dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8925 0 : break;
8926 : case 10:
8927 : dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8928 0 : break;
8929 : case 14:
8930 : dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8931 0 : break;
8932 : }
8933 :
8934 0 : if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
8935 0 : dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8936 : else
8937 : dpll |= PLL_REF_INPUT_DREFCLK;
8938 :
8939 0 : return dpll | DPLL_VCO_ENABLE;
8940 : }
8941 :
8942 0 : static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8943 : struct intel_crtc_state *crtc_state)
8944 : {
8945 0 : struct drm_device *dev = crtc->base.dev;
8946 0 : intel_clock_t clock, reduced_clock;
8947 0 : u32 dpll = 0, fp = 0, fp2 = 0;
8948 0 : bool ok, has_reduced_clock = false;
8949 : bool is_lvds = false;
8950 : struct intel_shared_dpll *pll;
8951 :
8952 0 : memset(&crtc_state->dpll_hw_state, 0,
8953 : sizeof(crtc_state->dpll_hw_state));
8954 :
8955 0 : is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
8956 :
8957 0 : WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
8958 : "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
8959 :
8960 0 : ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
8961 : &has_reduced_clock, &reduced_clock);
8962 0 : if (!ok && !crtc_state->clock_set) {
8963 0 : DRM_ERROR("Couldn't find PLL settings for mode!\n");
8964 0 : return -EINVAL;
8965 : }
8966 : /* Compat-code for transition, will disappear. */
8967 0 : if (!crtc_state->clock_set) {
8968 0 : crtc_state->dpll.n = clock.n;
8969 0 : crtc_state->dpll.m1 = clock.m1;
8970 0 : crtc_state->dpll.m2 = clock.m2;
8971 0 : crtc_state->dpll.p1 = clock.p1;
8972 0 : crtc_state->dpll.p2 = clock.p2;
8973 0 : }
8974 :
8975 : /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8976 0 : if (crtc_state->has_pch_encoder) {
8977 0 : fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8978 0 : if (has_reduced_clock)
8979 0 : fp2 = i9xx_dpll_compute_fp(&reduced_clock);
8980 :
8981 0 : dpll = ironlake_compute_dpll(crtc, crtc_state,
8982 : &fp, &reduced_clock,
8983 0 : has_reduced_clock ? &fp2 : NULL);
8984 :
8985 0 : crtc_state->dpll_hw_state.dpll = dpll;
8986 0 : crtc_state->dpll_hw_state.fp0 = fp;
8987 0 : if (has_reduced_clock)
8988 0 : crtc_state->dpll_hw_state.fp1 = fp2;
8989 : else
8990 0 : crtc_state->dpll_hw_state.fp1 = fp;
8991 :
8992 0 : pll = intel_get_shared_dpll(crtc, crtc_state);
8993 0 : if (pll == NULL) {
8994 : DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8995 : pipe_name(crtc->pipe));
8996 0 : return -EINVAL;
8997 : }
8998 : }
8999 :
9000 0 : if (is_lvds && has_reduced_clock)
9001 0 : crtc->lowfreq_avail = true;
9002 : else
9003 0 : crtc->lowfreq_avail = false;
9004 :
9005 0 : return 0;
9006 0 : }
9007 :
9008 0 : static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9009 : struct intel_link_m_n *m_n)
9010 : {
9011 0 : struct drm_device *dev = crtc->base.dev;
9012 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9013 0 : enum pipe pipe = crtc->pipe;
9014 :
9015 0 : m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9016 0 : m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9017 0 : m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9018 0 : & ~TU_SIZE_MASK;
9019 0 : m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9020 0 : m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9021 0 : & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9022 0 : }
9023 :
9024 0 : static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9025 : enum transcoder transcoder,
9026 : struct intel_link_m_n *m_n,
9027 : struct intel_link_m_n *m2_n2)
9028 : {
9029 0 : struct drm_device *dev = crtc->base.dev;
9030 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9031 0 : enum pipe pipe = crtc->pipe;
9032 :
9033 0 : if (INTEL_INFO(dev)->gen >= 5) {
9034 0 : m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9035 0 : m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9036 0 : m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9037 0 : & ~TU_SIZE_MASK;
9038 0 : m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9039 0 : m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9040 0 : & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9041 : /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9042 : * gen < 8) and if DRRS is supported (to make sure the
9043 : * registers are not unnecessarily read).
9044 : */
9045 0 : if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9046 0 : crtc->config->has_drrs) {
9047 0 : m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9048 0 : m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9049 0 : m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9050 0 : & ~TU_SIZE_MASK;
9051 0 : m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9052 0 : m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9053 0 : & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9054 0 : }
9055 : } else {
9056 0 : m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9057 0 : m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9058 0 : m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9059 0 : & ~TU_SIZE_MASK;
9060 0 : m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9061 0 : m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9062 0 : & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9063 : }
9064 0 : }
9065 :
9066 0 : void intel_dp_get_m_n(struct intel_crtc *crtc,
9067 : struct intel_crtc_state *pipe_config)
9068 : {
9069 0 : if (pipe_config->has_pch_encoder)
9070 0 : intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9071 : else
9072 0 : intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9073 0 : &pipe_config->dp_m_n,
9074 0 : &pipe_config->dp_m2_n2);
9075 0 : }
9076 :
9077 0 : static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9078 : struct intel_crtc_state *pipe_config)
9079 : {
9080 0 : intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9081 0 : &pipe_config->fdi_m_n, NULL);
9082 0 : }
9083 :
9084 0 : static void skylake_get_pfit_config(struct intel_crtc *crtc,
9085 : struct intel_crtc_state *pipe_config)
9086 : {
9087 0 : struct drm_device *dev = crtc->base.dev;
9088 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9089 0 : struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9090 : uint32_t ps_ctrl = 0;
9091 : int id = -1;
9092 : int i;
9093 :
9094 : /* find scaler attached to this pipe */
9095 0 : for (i = 0; i < crtc->num_scalers; i++) {
9096 0 : ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9097 0 : if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9098 : id = i;
9099 0 : pipe_config->pch_pfit.enabled = true;
9100 0 : pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9101 0 : pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9102 0 : break;
9103 : }
9104 : }
9105 :
9106 0 : scaler_state->scaler_id = id;
9107 0 : if (id >= 0) {
9108 0 : scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9109 0 : } else {
9110 0 : scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9111 : }
9112 0 : }
9113 :
9114 : static void
9115 0 : skylake_get_initial_plane_config(struct intel_crtc *crtc,
9116 : struct intel_initial_plane_config *plane_config)
9117 : {
9118 0 : struct drm_device *dev = crtc->base.dev;
9119 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9120 : u32 val, base, offset, stride_mult, tiling;
9121 0 : int pipe = crtc->pipe;
9122 : int fourcc, pixel_format;
9123 : unsigned int aligned_height;
9124 : struct drm_framebuffer *fb;
9125 : struct intel_framebuffer *intel_fb;
9126 :
9127 0 : intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9128 0 : if (!intel_fb) {
9129 : DRM_DEBUG_KMS("failed to alloc fb\n");
9130 0 : return;
9131 : }
9132 :
9133 0 : fb = &intel_fb->base;
9134 :
9135 0 : val = I915_READ(PLANE_CTL(pipe, 0));
9136 0 : if (!(val & PLANE_CTL_ENABLE))
9137 : goto error;
9138 :
9139 0 : pixel_format = val & PLANE_CTL_FORMAT_MASK;
9140 0 : fourcc = skl_format_to_fourcc(pixel_format,
9141 0 : val & PLANE_CTL_ORDER_RGBX,
9142 0 : val & PLANE_CTL_ALPHA_MASK);
9143 0 : fb->pixel_format = fourcc;
9144 0 : fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9145 :
9146 0 : tiling = val & PLANE_CTL_TILED_MASK;
9147 0 : switch (tiling) {
9148 : case PLANE_CTL_TILED_LINEAR:
9149 0 : fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9150 0 : break;
9151 : case PLANE_CTL_TILED_X:
9152 0 : plane_config->tiling = I915_TILING_X;
9153 0 : fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9154 0 : break;
9155 : case PLANE_CTL_TILED_Y:
9156 0 : fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9157 0 : break;
9158 : case PLANE_CTL_TILED_YF:
9159 0 : fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9160 0 : break;
9161 : default:
9162 0 : MISSING_CASE(tiling);
9163 0 : goto error;
9164 : }
9165 :
9166 0 : base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9167 0 : plane_config->base = base;
9168 :
9169 0 : offset = I915_READ(PLANE_OFFSET(pipe, 0));
9170 :
9171 0 : val = I915_READ(PLANE_SIZE(pipe, 0));
9172 0 : fb->height = ((val >> 16) & 0xfff) + 1;
9173 0 : fb->width = ((val >> 0) & 0x1fff) + 1;
9174 :
9175 0 : val = I915_READ(PLANE_STRIDE(pipe, 0));
9176 0 : stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
9177 0 : fb->pixel_format);
9178 0 : fb->pitches[0] = (val & 0x3ff) * stride_mult;
9179 :
9180 0 : aligned_height = intel_fb_align_height(dev, fb->height,
9181 0 : fb->pixel_format,
9182 0 : fb->modifier[0]);
9183 :
9184 0 : plane_config->size = fb->pitches[0] * aligned_height;
9185 :
9186 : DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9187 : pipe_name(pipe), fb->width, fb->height,
9188 : fb->bits_per_pixel, base, fb->pitches[0],
9189 : plane_config->size);
9190 :
9191 0 : plane_config->fb = intel_fb;
9192 0 : return;
9193 :
9194 : error:
9195 0 : kfree(fb);
9196 0 : }
9197 :
9198 0 : static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9199 : struct intel_crtc_state *pipe_config)
9200 : {
9201 0 : struct drm_device *dev = crtc->base.dev;
9202 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9203 : uint32_t tmp;
9204 :
9205 0 : tmp = I915_READ(PF_CTL(crtc->pipe));
9206 :
9207 0 : if (tmp & PF_ENABLE) {
9208 0 : pipe_config->pch_pfit.enabled = true;
9209 0 : pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9210 0 : pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9211 :
9212 : /* We currently do not free assignements of panel fitters on
9213 : * ivb/hsw (since we don't use the higher upscaling modes which
9214 : * differentiates them) so just WARN about this case for now. */
9215 0 : if (IS_GEN7(dev)) {
9216 0 : WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9217 : PF_PIPE_SEL_IVB(crtc->pipe));
9218 0 : }
9219 : }
9220 0 : }
9221 :
9222 : static void
9223 0 : ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9224 : struct intel_initial_plane_config *plane_config)
9225 : {
9226 0 : struct drm_device *dev = crtc->base.dev;
9227 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9228 : u32 val, base, offset;
9229 0 : int pipe = crtc->pipe;
9230 : int fourcc, pixel_format;
9231 : unsigned int aligned_height;
9232 : struct drm_framebuffer *fb;
9233 : struct intel_framebuffer *intel_fb;
9234 :
9235 0 : val = I915_READ(DSPCNTR(pipe));
9236 0 : if (!(val & DISPLAY_PLANE_ENABLE))
9237 0 : return;
9238 :
9239 0 : intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9240 0 : if (!intel_fb) {
9241 : DRM_DEBUG_KMS("failed to alloc fb\n");
9242 0 : return;
9243 : }
9244 :
9245 0 : fb = &intel_fb->base;
9246 :
9247 0 : if (INTEL_INFO(dev)->gen >= 4) {
9248 0 : if (val & DISPPLANE_TILED) {
9249 0 : plane_config->tiling = I915_TILING_X;
9250 0 : fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9251 0 : }
9252 : }
9253 :
9254 0 : pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9255 0 : fourcc = i9xx_format_to_fourcc(pixel_format);
9256 0 : fb->pixel_format = fourcc;
9257 0 : fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9258 :
9259 0 : base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9260 0 : if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9261 0 : offset = I915_READ(DSPOFFSET(pipe));
9262 0 : } else {
9263 0 : if (plane_config->tiling)
9264 0 : offset = I915_READ(DSPTILEOFF(pipe));
9265 : else
9266 0 : offset = I915_READ(DSPLINOFF(pipe));
9267 : }
9268 0 : plane_config->base = base;
9269 :
9270 0 : val = I915_READ(PIPESRC(pipe));
9271 0 : fb->width = ((val >> 16) & 0xfff) + 1;
9272 0 : fb->height = ((val >> 0) & 0xfff) + 1;
9273 :
9274 0 : val = I915_READ(DSPSTRIDE(pipe));
9275 0 : fb->pitches[0] = val & 0xffffffc0;
9276 :
9277 0 : aligned_height = intel_fb_align_height(dev, fb->height,
9278 0 : fb->pixel_format,
9279 0 : fb->modifier[0]);
9280 :
9281 0 : plane_config->size = fb->pitches[0] * aligned_height;
9282 :
9283 : DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9284 : pipe_name(pipe), fb->width, fb->height,
9285 : fb->bits_per_pixel, base, fb->pitches[0],
9286 : plane_config->size);
9287 :
9288 0 : plane_config->fb = intel_fb;
9289 0 : }
9290 :
9291 0 : static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9292 : struct intel_crtc_state *pipe_config)
9293 : {
9294 0 : struct drm_device *dev = crtc->base.dev;
9295 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9296 : uint32_t tmp;
9297 :
9298 0 : if (!intel_display_power_is_enabled(dev_priv,
9299 0 : POWER_DOMAIN_PIPE(crtc->pipe)))
9300 0 : return false;
9301 :
9302 0 : pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9303 0 : pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9304 :
9305 0 : tmp = I915_READ(PIPECONF(crtc->pipe));
9306 0 : if (!(tmp & PIPECONF_ENABLE))
9307 0 : return false;
9308 :
9309 0 : switch (tmp & PIPECONF_BPC_MASK) {
9310 : case PIPECONF_6BPC:
9311 0 : pipe_config->pipe_bpp = 18;
9312 0 : break;
9313 : case PIPECONF_8BPC:
9314 0 : pipe_config->pipe_bpp = 24;
9315 0 : break;
9316 : case PIPECONF_10BPC:
9317 0 : pipe_config->pipe_bpp = 30;
9318 0 : break;
9319 : case PIPECONF_12BPC:
9320 0 : pipe_config->pipe_bpp = 36;
9321 0 : break;
9322 : default:
9323 : break;
9324 : }
9325 :
9326 0 : if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9327 0 : pipe_config->limited_color_range = true;
9328 :
9329 0 : if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9330 : struct intel_shared_dpll *pll;
9331 :
9332 0 : pipe_config->has_pch_encoder = true;
9333 :
9334 0 : tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9335 0 : pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9336 0 : FDI_DP_PORT_WIDTH_SHIFT) + 1;
9337 :
9338 0 : ironlake_get_fdi_m_n_config(crtc, pipe_config);
9339 :
9340 0 : if (HAS_PCH_IBX(dev_priv->dev)) {
9341 0 : pipe_config->shared_dpll =
9342 0 : (enum intel_dpll_id) crtc->pipe;
9343 0 : } else {
9344 0 : tmp = I915_READ(PCH_DPLL_SEL);
9345 0 : if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9346 0 : pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9347 : else
9348 0 : pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9349 : }
9350 :
9351 0 : pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9352 :
9353 0 : WARN_ON(!pll->get_hw_state(dev_priv, pll,
9354 : &pipe_config->dpll_hw_state));
9355 :
9356 0 : tmp = pipe_config->dpll_hw_state.dpll;
9357 0 : pipe_config->pixel_multiplier =
9358 0 : ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9359 0 : >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9360 :
9361 0 : ironlake_pch_clock_get(crtc, pipe_config);
9362 0 : } else {
9363 0 : pipe_config->pixel_multiplier = 1;
9364 : }
9365 :
9366 0 : intel_get_pipe_timings(crtc, pipe_config);
9367 :
9368 0 : ironlake_get_pfit_config(crtc, pipe_config);
9369 :
9370 0 : return true;
9371 0 : }
9372 :
9373 0 : static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9374 : {
9375 0 : struct drm_device *dev = dev_priv->dev;
9376 : struct intel_crtc *crtc;
9377 :
9378 0 : for_each_intel_crtc(dev, crtc)
9379 0 : I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9380 : pipe_name(crtc->pipe));
9381 :
9382 0 : I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9383 0 : I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9384 0 : I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9385 0 : I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9386 0 : I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9387 0 : I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9388 : "CPU PWM1 enabled\n");
9389 0 : if (IS_HASWELL(dev))
9390 0 : I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9391 : "CPU PWM2 enabled\n");
9392 0 : I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9393 : "PCH PWM1 enabled\n");
9394 0 : I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9395 : "Utility pin enabled\n");
9396 0 : I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9397 :
9398 : /*
9399 : * In theory we can still leave IRQs enabled, as long as only the HPD
9400 : * interrupts remain enabled. We used to check for that, but since it's
9401 : * gen-specific and since we only disable LCPLL after we fully disable
9402 : * the interrupts, the check below should be enough.
9403 : */
9404 0 : I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9405 0 : }
9406 :
9407 0 : static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9408 : {
9409 0 : struct drm_device *dev = dev_priv->dev;
9410 :
9411 0 : if (IS_HASWELL(dev))
9412 0 : return I915_READ(D_COMP_HSW);
9413 : else
9414 0 : return I915_READ(D_COMP_BDW);
9415 0 : }
9416 :
9417 0 : static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9418 : {
9419 0 : struct drm_device *dev = dev_priv->dev;
9420 :
9421 0 : if (IS_HASWELL(dev)) {
9422 0 : mutex_lock(&dev_priv->rps.hw_lock);
9423 0 : if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9424 : val))
9425 0 : DRM_ERROR("Failed to write to D_COMP\n");
9426 0 : mutex_unlock(&dev_priv->rps.hw_lock);
9427 0 : } else {
9428 0 : I915_WRITE(D_COMP_BDW, val);
9429 0 : POSTING_READ(D_COMP_BDW);
9430 : }
9431 0 : }
9432 :
9433 : /*
9434 : * This function implements pieces of two sequences from BSpec:
9435 : * - Sequence for display software to disable LCPLL
9436 : * - Sequence for display software to allow package C8+
9437 : * The steps implemented here are just the steps that actually touch the LCPLL
9438 : * register. Callers should take care of disabling all the display engine
9439 : * functions, doing the mode unset, fixing interrupts, etc.
9440 : */
9441 0 : static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9442 : bool switch_to_fclk, bool allow_power_down)
9443 : {
9444 : uint32_t val;
9445 :
9446 0 : assert_can_disable_lcpll(dev_priv);
9447 :
9448 0 : val = I915_READ(LCPLL_CTL);
9449 :
9450 0 : if (switch_to_fclk) {
9451 0 : val |= LCPLL_CD_SOURCE_FCLK;
9452 0 : I915_WRITE(LCPLL_CTL, val);
9453 :
9454 0 : if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9455 : LCPLL_CD_SOURCE_FCLK_DONE, 1))
9456 0 : DRM_ERROR("Switching to FCLK failed\n");
9457 :
9458 0 : val = I915_READ(LCPLL_CTL);
9459 0 : }
9460 :
9461 0 : val |= LCPLL_PLL_DISABLE;
9462 0 : I915_WRITE(LCPLL_CTL, val);
9463 0 : POSTING_READ(LCPLL_CTL);
9464 :
9465 0 : if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9466 0 : DRM_ERROR("LCPLL still locked\n");
9467 :
9468 0 : val = hsw_read_dcomp(dev_priv);
9469 0 : val |= D_COMP_COMP_DISABLE;
9470 0 : hsw_write_dcomp(dev_priv, val);
9471 0 : ndelay(100);
9472 :
9473 0 : if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9474 : 1))
9475 0 : DRM_ERROR("D_COMP RCOMP still in progress\n");
9476 :
9477 0 : if (allow_power_down) {
9478 0 : val = I915_READ(LCPLL_CTL);
9479 0 : val |= LCPLL_POWER_DOWN_ALLOW;
9480 0 : I915_WRITE(LCPLL_CTL, val);
9481 0 : POSTING_READ(LCPLL_CTL);
9482 0 : }
9483 0 : }
9484 :
9485 : /*
9486 : * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9487 : * source.
9488 : */
9489 0 : static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9490 : {
9491 : uint32_t val;
9492 :
9493 0 : val = I915_READ(LCPLL_CTL);
9494 :
9495 0 : if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9496 0 : LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9497 0 : return;
9498 :
9499 : /*
9500 : * Make sure we're not on PC8 state before disabling PC8, otherwise
9501 : * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9502 : */
9503 0 : intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9504 :
9505 0 : if (val & LCPLL_POWER_DOWN_ALLOW) {
9506 0 : val &= ~LCPLL_POWER_DOWN_ALLOW;
9507 0 : I915_WRITE(LCPLL_CTL, val);
9508 0 : POSTING_READ(LCPLL_CTL);
9509 0 : }
9510 :
9511 0 : val = hsw_read_dcomp(dev_priv);
9512 0 : val |= D_COMP_COMP_FORCE;
9513 0 : val &= ~D_COMP_COMP_DISABLE;
9514 0 : hsw_write_dcomp(dev_priv, val);
9515 :
9516 0 : val = I915_READ(LCPLL_CTL);
9517 0 : val &= ~LCPLL_PLL_DISABLE;
9518 0 : I915_WRITE(LCPLL_CTL, val);
9519 :
9520 0 : if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9521 0 : DRM_ERROR("LCPLL not locked yet\n");
9522 :
9523 0 : if (val & LCPLL_CD_SOURCE_FCLK) {
9524 0 : val = I915_READ(LCPLL_CTL);
9525 0 : val &= ~LCPLL_CD_SOURCE_FCLK;
9526 0 : I915_WRITE(LCPLL_CTL, val);
9527 :
9528 0 : if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9529 : LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9530 0 : DRM_ERROR("Switching back to LCPLL failed\n");
9531 : }
9532 :
9533 0 : intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9534 0 : intel_update_cdclk(dev_priv->dev);
9535 0 : }
9536 :
9537 : /*
9538 : * Package states C8 and deeper are really deep PC states that can only be
9539 : * reached when all the devices on the system allow it, so even if the graphics
9540 : * device allows PC8+, it doesn't mean the system will actually get to these
9541 : * states. Our driver only allows PC8+ when going into runtime PM.
9542 : *
9543 : * The requirements for PC8+ are that all the outputs are disabled, the power
9544 : * well is disabled and most interrupts are disabled, and these are also
9545 : * requirements for runtime PM. When these conditions are met, we manually do
9546 : * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9547 : * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9548 : * hang the machine.
9549 : *
9550 : * When we really reach PC8 or deeper states (not just when we allow it) we lose
9551 : * the state of some registers, so when we come back from PC8+ we need to
9552 : * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9553 : * need to take care of the registers kept by RC6. Notice that this happens even
9554 : * if we don't put the device in PCI D3 state (which is what currently happens
9555 : * because of the runtime PM support).
9556 : *
9557 : * For more, read "Display Sequences for Package C8" on the hardware
9558 : * documentation.
9559 : */
9560 0 : void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9561 : {
9562 0 : struct drm_device *dev = dev_priv->dev;
9563 : uint32_t val;
9564 :
9565 : DRM_DEBUG_KMS("Enabling package C8+\n");
9566 :
9567 0 : if (HAS_PCH_LPT_LP(dev)) {
9568 0 : val = I915_READ(SOUTH_DSPCLK_GATE_D);
9569 0 : val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9570 0 : I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9571 0 : }
9572 :
9573 0 : lpt_disable_clkout_dp(dev);
9574 0 : hsw_disable_lcpll(dev_priv, true, true);
9575 0 : }
9576 :
9577 0 : void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9578 : {
9579 0 : struct drm_device *dev = dev_priv->dev;
9580 : uint32_t val;
9581 :
9582 : DRM_DEBUG_KMS("Disabling package C8+\n");
9583 :
9584 0 : hsw_restore_lcpll(dev_priv);
9585 0 : lpt_init_pch_refclk(dev);
9586 :
9587 0 : if (HAS_PCH_LPT_LP(dev)) {
9588 0 : val = I915_READ(SOUTH_DSPCLK_GATE_D);
9589 0 : val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9590 0 : I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9591 0 : }
9592 :
9593 0 : intel_prepare_ddi(dev);
9594 0 : }
9595 :
9596 0 : static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9597 : {
9598 0 : struct drm_device *dev = old_state->dev;
9599 0 : unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9600 :
9601 0 : broxton_set_cdclk(dev, req_cdclk);
9602 0 : }
9603 :
9604 : /* compute the max rate for new configuration */
9605 0 : static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9606 : {
9607 : struct intel_crtc *intel_crtc;
9608 : struct intel_crtc_state *crtc_state;
9609 : int max_pixel_rate = 0;
9610 :
9611 0 : for_each_intel_crtc(state->dev, intel_crtc) {
9612 : int pixel_rate;
9613 :
9614 0 : crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
9615 0 : if (IS_ERR(crtc_state))
9616 0 : return PTR_ERR(crtc_state);
9617 :
9618 0 : if (!crtc_state->base.enable)
9619 0 : continue;
9620 :
9621 0 : pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9622 :
9623 : /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9624 0 : if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
9625 0 : pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9626 :
9627 0 : max_pixel_rate = max(max_pixel_rate, pixel_rate);
9628 0 : }
9629 :
9630 0 : return max_pixel_rate;
9631 0 : }
9632 :
9633 0 : static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9634 : {
9635 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9636 : uint32_t val, data;
9637 : int ret;
9638 :
9639 0 : if (WARN((I915_READ(LCPLL_CTL) &
9640 : (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9641 : LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9642 : LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9643 : LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9644 : "trying to change cdclk frequency with cdclk not enabled\n"))
9645 0 : return;
9646 :
9647 0 : mutex_lock(&dev_priv->rps.hw_lock);
9648 0 : ret = sandybridge_pcode_write(dev_priv,
9649 : BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9650 0 : mutex_unlock(&dev_priv->rps.hw_lock);
9651 0 : if (ret) {
9652 0 : DRM_ERROR("failed to inform pcode about cdclk change\n");
9653 0 : return;
9654 : }
9655 :
9656 0 : val = I915_READ(LCPLL_CTL);
9657 0 : val |= LCPLL_CD_SOURCE_FCLK;
9658 0 : I915_WRITE(LCPLL_CTL, val);
9659 :
9660 0 : if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9661 : LCPLL_CD_SOURCE_FCLK_DONE, 1))
9662 0 : DRM_ERROR("Switching to FCLK failed\n");
9663 :
9664 0 : val = I915_READ(LCPLL_CTL);
9665 0 : val &= ~LCPLL_CLK_FREQ_MASK;
9666 :
9667 0 : switch (cdclk) {
9668 : case 450000:
9669 : val |= LCPLL_CLK_FREQ_450;
9670 : data = 0;
9671 0 : break;
9672 : case 540000:
9673 0 : val |= LCPLL_CLK_FREQ_54O_BDW;
9674 : data = 1;
9675 0 : break;
9676 : case 337500:
9677 0 : val |= LCPLL_CLK_FREQ_337_5_BDW;
9678 : data = 2;
9679 0 : break;
9680 : case 675000:
9681 0 : val |= LCPLL_CLK_FREQ_675_BDW;
9682 : data = 3;
9683 0 : break;
9684 : default:
9685 0 : WARN(1, "invalid cdclk frequency\n");
9686 0 : return;
9687 : }
9688 :
9689 0 : I915_WRITE(LCPLL_CTL, val);
9690 :
9691 0 : val = I915_READ(LCPLL_CTL);
9692 0 : val &= ~LCPLL_CD_SOURCE_FCLK;
9693 0 : I915_WRITE(LCPLL_CTL, val);
9694 :
9695 0 : if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9696 : LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9697 0 : DRM_ERROR("Switching back to LCPLL failed\n");
9698 :
9699 0 : mutex_lock(&dev_priv->rps.hw_lock);
9700 0 : sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9701 0 : mutex_unlock(&dev_priv->rps.hw_lock);
9702 :
9703 0 : I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9704 :
9705 0 : intel_update_cdclk(dev);
9706 :
9707 0 : WARN(cdclk != dev_priv->cdclk_freq,
9708 : "cdclk requested %d kHz but got %d kHz\n",
9709 : cdclk, dev_priv->cdclk_freq);
9710 0 : }
9711 :
9712 0 : static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9713 : {
9714 0 : struct drm_i915_private *dev_priv = to_i915(state->dev);
9715 0 : int max_pixclk = ilk_max_pixel_rate(state);
9716 : int cdclk;
9717 :
9718 : /*
9719 : * FIXME should also account for plane ratio
9720 : * once 64bpp pixel formats are supported.
9721 : */
9722 0 : if (max_pixclk > 540000)
9723 0 : cdclk = 675000;
9724 0 : else if (max_pixclk > 450000)
9725 0 : cdclk = 540000;
9726 0 : else if (max_pixclk > 337500)
9727 0 : cdclk = 450000;
9728 : else
9729 : cdclk = 337500;
9730 :
9731 : /*
9732 : * FIXME move the cdclk caclulation to
9733 : * compute_config() so we can fail gracegully.
9734 : */
9735 0 : if (cdclk > dev_priv->max_cdclk_freq) {
9736 0 : DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9737 : cdclk, dev_priv->max_cdclk_freq);
9738 0 : cdclk = dev_priv->max_cdclk_freq;
9739 0 : }
9740 :
9741 0 : to_intel_atomic_state(state)->cdclk = cdclk;
9742 :
9743 0 : return 0;
9744 : }
9745 :
9746 0 : static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9747 : {
9748 0 : struct drm_device *dev = old_state->dev;
9749 0 : unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9750 :
9751 0 : broadwell_set_cdclk(dev, req_cdclk);
9752 0 : }
9753 :
9754 0 : static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9755 : struct intel_crtc_state *crtc_state)
9756 : {
9757 0 : if (!intel_ddi_pll_select(crtc, crtc_state))
9758 0 : return -EINVAL;
9759 :
9760 0 : crtc->lowfreq_avail = false;
9761 :
9762 0 : return 0;
9763 0 : }
9764 :
9765 0 : static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9766 : enum port port,
9767 : struct intel_crtc_state *pipe_config)
9768 : {
9769 0 : switch (port) {
9770 : case PORT_A:
9771 0 : pipe_config->ddi_pll_sel = SKL_DPLL0;
9772 0 : pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9773 0 : break;
9774 : case PORT_B:
9775 0 : pipe_config->ddi_pll_sel = SKL_DPLL1;
9776 0 : pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9777 0 : break;
9778 : case PORT_C:
9779 0 : pipe_config->ddi_pll_sel = SKL_DPLL2;
9780 0 : pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9781 0 : break;
9782 : default:
9783 0 : DRM_ERROR("Incorrect port type\n");
9784 0 : }
9785 0 : }
9786 :
9787 0 : static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9788 : enum port port,
9789 : struct intel_crtc_state *pipe_config)
9790 : {
9791 : u32 temp, dpll_ctl1;
9792 :
9793 0 : temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9794 0 : pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9795 :
9796 0 : switch (pipe_config->ddi_pll_sel) {
9797 : case SKL_DPLL0:
9798 : /*
9799 : * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9800 : * of the shared DPLL framework and thus needs to be read out
9801 : * separately
9802 : */
9803 0 : dpll_ctl1 = I915_READ(DPLL_CTRL1);
9804 0 : pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9805 0 : break;
9806 : case SKL_DPLL1:
9807 0 : pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9808 0 : break;
9809 : case SKL_DPLL2:
9810 0 : pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9811 0 : break;
9812 : case SKL_DPLL3:
9813 0 : pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9814 0 : break;
9815 : }
9816 0 : }
9817 :
9818 0 : static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9819 : enum port port,
9820 : struct intel_crtc_state *pipe_config)
9821 : {
9822 0 : pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9823 :
9824 0 : switch (pipe_config->ddi_pll_sel) {
9825 : case PORT_CLK_SEL_WRPLL1:
9826 0 : pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9827 0 : break;
9828 : case PORT_CLK_SEL_WRPLL2:
9829 0 : pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9830 0 : break;
9831 : case PORT_CLK_SEL_SPLL:
9832 0 : pipe_config->shared_dpll = DPLL_ID_SPLL;
9833 0 : }
9834 0 : }
9835 :
9836 0 : static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9837 : struct intel_crtc_state *pipe_config)
9838 : {
9839 0 : struct drm_device *dev = crtc->base.dev;
9840 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9841 : struct intel_shared_dpll *pll;
9842 : enum port port;
9843 : uint32_t tmp;
9844 :
9845 0 : tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9846 :
9847 0 : port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9848 :
9849 0 : if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9850 0 : skylake_get_ddi_pll(dev_priv, port, pipe_config);
9851 0 : else if (IS_BROXTON(dev))
9852 0 : bxt_get_ddi_pll(dev_priv, port, pipe_config);
9853 : else
9854 0 : haswell_get_ddi_pll(dev_priv, port, pipe_config);
9855 :
9856 0 : if (pipe_config->shared_dpll >= 0) {
9857 0 : pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9858 :
9859 0 : WARN_ON(!pll->get_hw_state(dev_priv, pll,
9860 : &pipe_config->dpll_hw_state));
9861 0 : }
9862 :
9863 : /*
9864 : * Haswell has only FDI/PCH transcoder A. It is which is connected to
9865 : * DDI E. So just check whether this pipe is wired to DDI E and whether
9866 : * the PCH transcoder is on.
9867 : */
9868 0 : if (INTEL_INFO(dev)->gen < 9 &&
9869 0 : (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9870 0 : pipe_config->has_pch_encoder = true;
9871 :
9872 0 : tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9873 0 : pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9874 0 : FDI_DP_PORT_WIDTH_SHIFT) + 1;
9875 :
9876 0 : ironlake_get_fdi_m_n_config(crtc, pipe_config);
9877 0 : }
9878 0 : }
9879 :
9880 0 : static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9881 : struct intel_crtc_state *pipe_config)
9882 : {
9883 0 : struct drm_device *dev = crtc->base.dev;
9884 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9885 : enum intel_display_power_domain pfit_domain;
9886 : uint32_t tmp;
9887 :
9888 0 : if (!intel_display_power_is_enabled(dev_priv,
9889 0 : POWER_DOMAIN_PIPE(crtc->pipe)))
9890 0 : return false;
9891 :
9892 0 : pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9893 0 : pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9894 :
9895 0 : tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9896 0 : if (tmp & TRANS_DDI_FUNC_ENABLE) {
9897 : enum pipe trans_edp_pipe;
9898 0 : switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9899 : default:
9900 0 : WARN(1, "unknown pipe linked to edp transcoder\n");
9901 : case TRANS_DDI_EDP_INPUT_A_ONOFF:
9902 : case TRANS_DDI_EDP_INPUT_A_ON:
9903 : trans_edp_pipe = PIPE_A;
9904 0 : break;
9905 : case TRANS_DDI_EDP_INPUT_B_ONOFF:
9906 : trans_edp_pipe = PIPE_B;
9907 0 : break;
9908 : case TRANS_DDI_EDP_INPUT_C_ONOFF:
9909 : trans_edp_pipe = PIPE_C;
9910 0 : break;
9911 : }
9912 :
9913 0 : if (trans_edp_pipe == crtc->pipe)
9914 0 : pipe_config->cpu_transcoder = TRANSCODER_EDP;
9915 0 : }
9916 :
9917 0 : if (!intel_display_power_is_enabled(dev_priv,
9918 0 : POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
9919 0 : return false;
9920 :
9921 0 : tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9922 0 : if (!(tmp & PIPECONF_ENABLE))
9923 0 : return false;
9924 :
9925 0 : haswell_get_ddi_port_state(crtc, pipe_config);
9926 :
9927 0 : intel_get_pipe_timings(crtc, pipe_config);
9928 :
9929 0 : if (INTEL_INFO(dev)->gen >= 9) {
9930 0 : skl_init_scalers(dev, crtc, pipe_config);
9931 0 : }
9932 :
9933 0 : pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9934 :
9935 0 : if (INTEL_INFO(dev)->gen >= 9) {
9936 0 : pipe_config->scaler_state.scaler_id = -1;
9937 0 : pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9938 0 : }
9939 :
9940 0 : if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
9941 0 : if (INTEL_INFO(dev)->gen >= 9)
9942 0 : skylake_get_pfit_config(crtc, pipe_config);
9943 : else
9944 0 : ironlake_get_pfit_config(crtc, pipe_config);
9945 : }
9946 :
9947 0 : if (IS_HASWELL(dev))
9948 0 : pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
9949 0 : (I915_READ(IPS_CTL) & IPS_ENABLE);
9950 :
9951 0 : if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
9952 0 : pipe_config->pixel_multiplier =
9953 0 : I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9954 0 : } else {
9955 0 : pipe_config->pixel_multiplier = 1;
9956 : }
9957 :
9958 0 : return true;
9959 0 : }
9960 :
9961 0 : static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
9962 : {
9963 0 : struct drm_device *dev = crtc->dev;
9964 0 : struct drm_i915_private *dev_priv = dev->dev_private;
9965 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9966 : uint32_t cntl = 0, size = 0;
9967 :
9968 0 : if (on) {
9969 0 : unsigned int width = intel_crtc->base.cursor->state->crtc_w;
9970 0 : unsigned int height = intel_crtc->base.cursor->state->crtc_h;
9971 0 : unsigned int stride = roundup_pow_of_two(width) * 4;
9972 :
9973 0 : switch (stride) {
9974 : default:
9975 0 : WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
9976 : width, stride);
9977 0 : stride = 256;
9978 : /* fallthrough */
9979 : case 256:
9980 : case 512:
9981 : case 1024:
9982 : case 2048:
9983 : break;
9984 : }
9985 :
9986 : cntl |= CURSOR_ENABLE |
9987 : CURSOR_GAMMA_ENABLE |
9988 0 : CURSOR_FORMAT_ARGB |
9989 0 : CURSOR_STRIDE(stride);
9990 :
9991 0 : size = (height << 12) | width;
9992 0 : }
9993 :
9994 0 : if (intel_crtc->cursor_cntl != 0 &&
9995 0 : (intel_crtc->cursor_base != base ||
9996 0 : intel_crtc->cursor_size != size ||
9997 0 : intel_crtc->cursor_cntl != cntl)) {
9998 : /* On these chipsets we can only modify the base/size/stride
9999 : * whilst the cursor is disabled.
10000 : */
10001 0 : I915_WRITE(CURCNTR(PIPE_A), 0);
10002 0 : POSTING_READ(CURCNTR(PIPE_A));
10003 0 : intel_crtc->cursor_cntl = 0;
10004 0 : }
10005 :
10006 0 : if (intel_crtc->cursor_base != base) {
10007 0 : I915_WRITE(CURBASE(PIPE_A), base);
10008 0 : intel_crtc->cursor_base = base;
10009 0 : }
10010 :
10011 0 : if (intel_crtc->cursor_size != size) {
10012 0 : I915_WRITE(CURSIZE, size);
10013 0 : intel_crtc->cursor_size = size;
10014 0 : }
10015 :
10016 0 : if (intel_crtc->cursor_cntl != cntl) {
10017 0 : I915_WRITE(CURCNTR(PIPE_A), cntl);
10018 0 : POSTING_READ(CURCNTR(PIPE_A));
10019 0 : intel_crtc->cursor_cntl = cntl;
10020 0 : }
10021 0 : }
10022 :
10023 0 : static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
10024 : {
10025 0 : struct drm_device *dev = crtc->dev;
10026 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10027 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10028 0 : int pipe = intel_crtc->pipe;
10029 : uint32_t cntl = 0;
10030 :
10031 0 : if (on) {
10032 : cntl = MCURSOR_GAMMA_ENABLE;
10033 0 : switch (intel_crtc->base.cursor->state->crtc_w) {
10034 : case 64:
10035 : cntl |= CURSOR_MODE_64_ARGB_AX;
10036 0 : break;
10037 : case 128:
10038 : cntl |= CURSOR_MODE_128_ARGB_AX;
10039 0 : break;
10040 : case 256:
10041 : cntl |= CURSOR_MODE_256_ARGB_AX;
10042 0 : break;
10043 : default:
10044 0 : MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
10045 0 : return;
10046 : }
10047 0 : cntl |= pipe << 28; /* Connect to correct pipe */
10048 :
10049 0 : if (HAS_DDI(dev))
10050 0 : cntl |= CURSOR_PIPE_CSC_ENABLE;
10051 : }
10052 :
10053 0 : if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
10054 0 : cntl |= CURSOR_ROTATE_180;
10055 :
10056 0 : if (intel_crtc->cursor_cntl != cntl) {
10057 0 : I915_WRITE(CURCNTR(pipe), cntl);
10058 0 : POSTING_READ(CURCNTR(pipe));
10059 0 : intel_crtc->cursor_cntl = cntl;
10060 0 : }
10061 :
10062 : /* and commit changes on next vblank */
10063 0 : I915_WRITE(CURBASE(pipe), base);
10064 0 : POSTING_READ(CURBASE(pipe));
10065 :
10066 0 : intel_crtc->cursor_base = base;
10067 0 : }
10068 :
10069 : /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10070 0 : static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10071 : bool on)
10072 : {
10073 0 : struct drm_device *dev = crtc->dev;
10074 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10075 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10076 0 : int pipe = intel_crtc->pipe;
10077 0 : struct drm_plane_state *cursor_state = crtc->cursor->state;
10078 0 : int x = cursor_state->crtc_x;
10079 0 : int y = cursor_state->crtc_y;
10080 : u32 base = 0, pos = 0;
10081 :
10082 0 : base = intel_crtc->cursor_addr;
10083 :
10084 0 : if (x >= intel_crtc->config->pipe_src_w)
10085 0 : on = false;
10086 :
10087 0 : if (y >= intel_crtc->config->pipe_src_h)
10088 0 : on = false;
10089 :
10090 0 : if (x < 0) {
10091 0 : if (x + cursor_state->crtc_w <= 0)
10092 0 : on = false;
10093 :
10094 : pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10095 0 : x = -x;
10096 0 : }
10097 0 : pos |= x << CURSOR_X_SHIFT;
10098 :
10099 0 : if (y < 0) {
10100 0 : if (y + cursor_state->crtc_h <= 0)
10101 0 : on = false;
10102 :
10103 0 : pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10104 0 : y = -y;
10105 0 : }
10106 0 : pos |= y << CURSOR_Y_SHIFT;
10107 :
10108 0 : I915_WRITE(CURPOS(pipe), pos);
10109 :
10110 : /* ILK+ do this automagically */
10111 0 : if (HAS_GMCH_DISPLAY(dev) &&
10112 0 : crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
10113 0 : base += (cursor_state->crtc_h *
10114 0 : cursor_state->crtc_w - 1) * 4;
10115 0 : }
10116 :
10117 0 : if (IS_845G(dev) || IS_I865G(dev))
10118 0 : i845_update_cursor(crtc, base, on);
10119 : else
10120 0 : i9xx_update_cursor(crtc, base, on);
10121 0 : }
10122 :
10123 0 : static bool cursor_size_ok(struct drm_device *dev,
10124 : uint32_t width, uint32_t height)
10125 : {
10126 0 : if (width == 0 || height == 0)
10127 0 : return false;
10128 :
10129 : /*
10130 : * 845g/865g are special in that they are only limited by
10131 : * the width of their cursors, the height is arbitrary up to
10132 : * the precision of the register. Everything else requires
10133 : * square cursors, limited to a few power-of-two sizes.
10134 : */
10135 0 : if (IS_845G(dev) || IS_I865G(dev)) {
10136 0 : if ((width & 63) != 0)
10137 0 : return false;
10138 :
10139 0 : if (width > (IS_845G(dev) ? 64 : 512))
10140 0 : return false;
10141 :
10142 0 : if (height > 1023)
10143 0 : return false;
10144 : } else {
10145 0 : switch (width | height) {
10146 : case 256:
10147 : case 128:
10148 0 : if (IS_GEN2(dev))
10149 0 : return false;
10150 : case 64:
10151 : break;
10152 : default:
10153 0 : return false;
10154 : }
10155 : }
10156 :
10157 0 : return true;
10158 0 : }
10159 :
10160 0 : static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10161 : u16 *blue, uint32_t start, uint32_t size)
10162 : {
10163 0 : int end = (start + size > 256) ? 256 : start + size, i;
10164 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10165 :
10166 0 : for (i = start; i < end; i++) {
10167 0 : intel_crtc->lut_r[i] = red[i] >> 8;
10168 0 : intel_crtc->lut_g[i] = green[i] >> 8;
10169 0 : intel_crtc->lut_b[i] = blue[i] >> 8;
10170 : }
10171 :
10172 0 : intel_crtc_load_lut(crtc);
10173 0 : }
10174 :
10175 : /* VESA 640x480x72Hz mode to set on the pipe */
10176 : static struct drm_display_mode load_detect_mode = {
10177 : DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10178 : 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10179 : };
10180 :
10181 : struct drm_framebuffer *
10182 0 : __intel_framebuffer_create(struct drm_device *dev,
10183 : struct drm_mode_fb_cmd2 *mode_cmd,
10184 : struct drm_i915_gem_object *obj)
10185 : {
10186 : struct intel_framebuffer *intel_fb;
10187 : int ret;
10188 :
10189 0 : intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10190 0 : if (!intel_fb) {
10191 0 : drm_gem_object_unreference(&obj->base);
10192 0 : return ERR_PTR(-ENOMEM);
10193 : }
10194 :
10195 0 : ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10196 0 : if (ret)
10197 : goto err;
10198 :
10199 0 : return &intel_fb->base;
10200 : err:
10201 0 : drm_gem_object_unreference(&obj->base);
10202 0 : kfree(intel_fb);
10203 :
10204 0 : return ERR_PTR(ret);
10205 0 : }
10206 :
10207 : static struct drm_framebuffer *
10208 0 : intel_framebuffer_create(struct drm_device *dev,
10209 : struct drm_mode_fb_cmd2 *mode_cmd,
10210 : struct drm_i915_gem_object *obj)
10211 : {
10212 : struct drm_framebuffer *fb;
10213 : int ret;
10214 :
10215 0 : ret = i915_mutex_lock_interruptible(dev);
10216 0 : if (ret)
10217 0 : return ERR_PTR(ret);
10218 0 : fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10219 0 : mutex_unlock(&dev->struct_mutex);
10220 :
10221 0 : return fb;
10222 0 : }
10223 :
10224 : static u32
10225 0 : intel_framebuffer_pitch_for_width(int width, int bpp)
10226 : {
10227 0 : u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10228 0 : return roundup2(pitch, 64);
10229 : }
10230 :
10231 : static u32
10232 0 : intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10233 : {
10234 0 : u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10235 0 : return PAGE_ALIGN(pitch * mode->vdisplay);
10236 : }
10237 :
10238 : static struct drm_framebuffer *
10239 0 : intel_framebuffer_create_for_mode(struct drm_device *dev,
10240 : struct drm_display_mode *mode,
10241 : int depth, int bpp)
10242 : {
10243 : struct drm_i915_gem_object *obj;
10244 0 : struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10245 :
10246 0 : obj = i915_gem_alloc_object(dev,
10247 0 : intel_framebuffer_size_for_mode(mode, bpp));
10248 0 : if (obj == NULL)
10249 0 : return ERR_PTR(-ENOMEM);
10250 :
10251 0 : mode_cmd.width = mode->hdisplay;
10252 0 : mode_cmd.height = mode->vdisplay;
10253 0 : mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10254 : bpp);
10255 0 : mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10256 :
10257 0 : return intel_framebuffer_create(dev, &mode_cmd, obj);
10258 0 : }
10259 :
10260 : static struct drm_framebuffer *
10261 0 : mode_fits_in_fbdev(struct drm_device *dev,
10262 : struct drm_display_mode *mode)
10263 : {
10264 : #ifdef CONFIG_DRM_FBDEV_EMULATION
10265 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10266 : struct drm_i915_gem_object *obj;
10267 : struct drm_framebuffer *fb;
10268 :
10269 0 : if (!dev_priv->fbdev)
10270 0 : return NULL;
10271 :
10272 0 : if (!dev_priv->fbdev->fb)
10273 0 : return NULL;
10274 :
10275 0 : obj = dev_priv->fbdev->fb->obj;
10276 0 : BUG_ON(!obj);
10277 :
10278 0 : fb = &dev_priv->fbdev->fb->base;
10279 0 : if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10280 0 : fb->bits_per_pixel))
10281 0 : return NULL;
10282 :
10283 0 : if (obj->base.size < mode->vdisplay * fb->pitches[0])
10284 0 : return NULL;
10285 :
10286 0 : return fb;
10287 : #else
10288 : return NULL;
10289 : #endif
10290 0 : }
10291 :
10292 0 : static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10293 : struct drm_crtc *crtc,
10294 : struct drm_display_mode *mode,
10295 : struct drm_framebuffer *fb,
10296 : int x, int y)
10297 : {
10298 : struct drm_plane_state *plane_state;
10299 0 : int hdisplay, vdisplay;
10300 : int ret;
10301 :
10302 0 : plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10303 0 : if (IS_ERR(plane_state))
10304 0 : return PTR_ERR(plane_state);
10305 :
10306 0 : if (mode)
10307 0 : drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10308 : else
10309 0 : hdisplay = vdisplay = 0;
10310 :
10311 0 : ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10312 0 : if (ret)
10313 0 : return ret;
10314 0 : drm_atomic_set_fb_for_plane(plane_state, fb);
10315 0 : plane_state->crtc_x = 0;
10316 0 : plane_state->crtc_y = 0;
10317 0 : plane_state->crtc_w = hdisplay;
10318 0 : plane_state->crtc_h = vdisplay;
10319 0 : plane_state->src_x = x << 16;
10320 0 : plane_state->src_y = y << 16;
10321 0 : plane_state->src_w = hdisplay << 16;
10322 0 : plane_state->src_h = vdisplay << 16;
10323 :
10324 0 : return 0;
10325 0 : }
10326 :
10327 0 : bool intel_get_load_detect_pipe(struct drm_connector *connector,
10328 : struct drm_display_mode *mode,
10329 : struct intel_load_detect_pipe *old,
10330 : struct drm_modeset_acquire_ctx *ctx)
10331 : {
10332 : struct intel_crtc *intel_crtc;
10333 : struct intel_encoder *intel_encoder =
10334 0 : intel_attached_encoder(connector);
10335 : struct drm_crtc *possible_crtc;
10336 0 : struct drm_encoder *encoder = &intel_encoder->base;
10337 : struct drm_crtc *crtc = NULL;
10338 0 : struct drm_device *dev = encoder->dev;
10339 : struct drm_framebuffer *fb;
10340 0 : struct drm_mode_config *config = &dev->mode_config;
10341 : struct drm_atomic_state *state = NULL;
10342 : struct drm_connector_state *connector_state;
10343 : struct intel_crtc_state *crtc_state;
10344 : int ret, i = -1;
10345 :
10346 0 : DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10347 : connector->base.id, connector->name,
10348 : encoder->base.id, encoder->name);
10349 :
10350 : retry:
10351 0 : ret = drm_modeset_lock(&config->connection_mutex, ctx);
10352 0 : if (ret)
10353 : goto fail;
10354 :
10355 : /*
10356 : * Algorithm gets a little messy:
10357 : *
10358 : * - if the connector already has an assigned crtc, use it (but make
10359 : * sure it's on first)
10360 : *
10361 : * - try to find the first unused crtc that can drive this connector,
10362 : * and use that if we find one
10363 : */
10364 :
10365 : /* See if we already have a CRTC for this connector */
10366 0 : if (encoder->crtc) {
10367 : crtc = encoder->crtc;
10368 :
10369 0 : ret = drm_modeset_lock(&crtc->mutex, ctx);
10370 0 : if (ret)
10371 : goto fail;
10372 0 : ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10373 0 : if (ret)
10374 : goto fail;
10375 :
10376 0 : old->dpms_mode = connector->dpms;
10377 0 : old->load_detect_temp = false;
10378 :
10379 : /* Make sure the crtc and connector are running */
10380 0 : if (connector->dpms != DRM_MODE_DPMS_ON)
10381 0 : connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
10382 :
10383 0 : return true;
10384 : }
10385 :
10386 : /* Find an unused one (if possible) */
10387 0 : for_each_crtc(dev, possible_crtc) {
10388 0 : i++;
10389 0 : if (!(encoder->possible_crtcs & (1 << i)))
10390 : continue;
10391 0 : if (possible_crtc->state->enable)
10392 : continue;
10393 :
10394 : crtc = possible_crtc;
10395 0 : break;
10396 : }
10397 :
10398 : /*
10399 : * If we didn't find an unused CRTC, don't use any.
10400 : */
10401 0 : if (!crtc) {
10402 : DRM_DEBUG_KMS("no pipe available for load-detect\n");
10403 : goto fail;
10404 : }
10405 :
10406 0 : ret = drm_modeset_lock(&crtc->mutex, ctx);
10407 0 : if (ret)
10408 : goto fail;
10409 0 : ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10410 0 : if (ret)
10411 : goto fail;
10412 :
10413 0 : intel_crtc = to_intel_crtc(crtc);
10414 0 : old->dpms_mode = connector->dpms;
10415 0 : old->load_detect_temp = true;
10416 0 : old->release_fb = NULL;
10417 :
10418 0 : state = drm_atomic_state_alloc(dev);
10419 0 : if (!state)
10420 0 : return false;
10421 :
10422 0 : state->acquire_ctx = ctx;
10423 :
10424 0 : connector_state = drm_atomic_get_connector_state(state, connector);
10425 0 : if (IS_ERR(connector_state)) {
10426 0 : ret = PTR_ERR(connector_state);
10427 0 : goto fail;
10428 : }
10429 :
10430 0 : connector_state->crtc = crtc;
10431 0 : connector_state->best_encoder = &intel_encoder->base;
10432 :
10433 0 : crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10434 0 : if (IS_ERR(crtc_state)) {
10435 0 : ret = PTR_ERR(crtc_state);
10436 0 : goto fail;
10437 : }
10438 :
10439 0 : crtc_state->base.active = crtc_state->base.enable = true;
10440 :
10441 0 : if (!mode)
10442 0 : mode = &load_detect_mode;
10443 :
10444 : /* We need a framebuffer large enough to accommodate all accesses
10445 : * that the plane may generate whilst we perform load detection.
10446 : * We can not rely on the fbcon either being present (we get called
10447 : * during its initialisation to detect all boot displays, or it may
10448 : * not even exist) or that it is large enough to satisfy the
10449 : * requested mode.
10450 : */
10451 0 : fb = mode_fits_in_fbdev(dev, mode);
10452 0 : if (fb == NULL) {
10453 : DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10454 0 : fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10455 0 : old->release_fb = fb;
10456 0 : } else
10457 : DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10458 0 : if (IS_ERR(fb)) {
10459 : DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10460 : goto fail;
10461 : }
10462 :
10463 0 : ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10464 0 : if (ret)
10465 : goto fail;
10466 :
10467 0 : drm_mode_copy(&crtc_state->base.mode, mode);
10468 :
10469 0 : if (drm_atomic_commit(state)) {
10470 : DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10471 0 : if (old->release_fb)
10472 0 : old->release_fb->funcs->destroy(old->release_fb);
10473 : goto fail;
10474 : }
10475 0 : crtc->primary->crtc = crtc;
10476 :
10477 : /* let the connector get through one full cycle before testing */
10478 0 : intel_wait_for_vblank(dev, intel_crtc->pipe);
10479 0 : return true;
10480 :
10481 : fail:
10482 0 : drm_atomic_state_free(state);
10483 : state = NULL;
10484 :
10485 0 : if (ret == -EDEADLK) {
10486 0 : drm_modeset_backoff(ctx);
10487 0 : goto retry;
10488 : }
10489 :
10490 0 : return false;
10491 0 : }
10492 :
10493 0 : void intel_release_load_detect_pipe(struct drm_connector *connector,
10494 : struct intel_load_detect_pipe *old,
10495 : struct drm_modeset_acquire_ctx *ctx)
10496 : {
10497 0 : struct drm_device *dev = connector->dev;
10498 : struct intel_encoder *intel_encoder =
10499 0 : intel_attached_encoder(connector);
10500 0 : struct drm_encoder *encoder = &intel_encoder->base;
10501 0 : struct drm_crtc *crtc = encoder->crtc;
10502 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10503 : struct drm_atomic_state *state;
10504 : struct drm_connector_state *connector_state;
10505 : struct intel_crtc_state *crtc_state;
10506 : int ret;
10507 :
10508 : DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10509 : connector->base.id, connector->name,
10510 : encoder->base.id, encoder->name);
10511 :
10512 0 : if (old->load_detect_temp) {
10513 0 : state = drm_atomic_state_alloc(dev);
10514 0 : if (!state)
10515 : goto fail;
10516 :
10517 0 : state->acquire_ctx = ctx;
10518 :
10519 0 : connector_state = drm_atomic_get_connector_state(state, connector);
10520 0 : if (IS_ERR(connector_state))
10521 : goto fail;
10522 :
10523 0 : crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10524 0 : if (IS_ERR(crtc_state))
10525 : goto fail;
10526 :
10527 0 : connector_state->best_encoder = NULL;
10528 0 : connector_state->crtc = NULL;
10529 :
10530 0 : crtc_state->base.enable = crtc_state->base.active = false;
10531 :
10532 0 : ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
10533 : 0, 0);
10534 0 : if (ret)
10535 : goto fail;
10536 :
10537 0 : ret = drm_atomic_commit(state);
10538 0 : if (ret)
10539 : goto fail;
10540 :
10541 0 : if (old->release_fb) {
10542 0 : drm_framebuffer_unregister_private(old->release_fb);
10543 0 : drm_framebuffer_unreference(old->release_fb);
10544 0 : }
10545 :
10546 0 : return;
10547 : }
10548 :
10549 : /* Switch crtc and encoder back off if necessary */
10550 0 : if (old->dpms_mode != DRM_MODE_DPMS_ON)
10551 0 : connector->funcs->dpms(connector, old->dpms_mode);
10552 :
10553 0 : return;
10554 : fail:
10555 : DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
10556 0 : drm_atomic_state_free(state);
10557 0 : }
10558 :
10559 0 : static int i9xx_pll_refclk(struct drm_device *dev,
10560 : const struct intel_crtc_state *pipe_config)
10561 : {
10562 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10563 0 : u32 dpll = pipe_config->dpll_hw_state.dpll;
10564 :
10565 0 : if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10566 0 : return dev_priv->vbt.lvds_ssc_freq;
10567 0 : else if (HAS_PCH_SPLIT(dev))
10568 0 : return 120000;
10569 0 : else if (!IS_GEN2(dev))
10570 0 : return 96000;
10571 : else
10572 0 : return 48000;
10573 0 : }
10574 :
10575 : /* Returns the clock of the currently programmed mode of the given pipe. */
10576 0 : static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10577 : struct intel_crtc_state *pipe_config)
10578 : {
10579 0 : struct drm_device *dev = crtc->base.dev;
10580 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10581 0 : int pipe = pipe_config->cpu_transcoder;
10582 0 : u32 dpll = pipe_config->dpll_hw_state.dpll;
10583 : u32 fp;
10584 0 : intel_clock_t clock;
10585 : int port_clock;
10586 0 : int refclk = i9xx_pll_refclk(dev, pipe_config);
10587 :
10588 0 : if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10589 0 : fp = pipe_config->dpll_hw_state.fp0;
10590 : else
10591 0 : fp = pipe_config->dpll_hw_state.fp1;
10592 :
10593 0 : clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10594 0 : if (IS_PINEVIEW(dev)) {
10595 0 : clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10596 0 : clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10597 0 : } else {
10598 0 : clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10599 0 : clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10600 : }
10601 :
10602 0 : if (!IS_GEN2(dev)) {
10603 0 : if (IS_PINEVIEW(dev))
10604 0 : clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10605 : DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10606 : else
10607 0 : clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10608 : DPLL_FPA01_P1_POST_DIV_SHIFT);
10609 :
10610 0 : switch (dpll & DPLL_MODE_MASK) {
10611 : case DPLLB_MODE_DAC_SERIAL:
10612 0 : clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10613 : 5 : 10;
10614 0 : break;
10615 : case DPLLB_MODE_LVDS:
10616 0 : clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10617 : 7 : 14;
10618 0 : break;
10619 : default:
10620 : DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10621 : "mode\n", (int)(dpll & DPLL_MODE_MASK));
10622 0 : return;
10623 : }
10624 :
10625 0 : if (IS_PINEVIEW(dev))
10626 0 : port_clock = pnv_calc_dpll_params(refclk, &clock);
10627 : else
10628 0 : port_clock = i9xx_calc_dpll_params(refclk, &clock);
10629 : } else {
10630 0 : u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10631 0 : bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10632 :
10633 0 : if (is_lvds) {
10634 0 : clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10635 : DPLL_FPA01_P1_POST_DIV_SHIFT);
10636 :
10637 0 : if (lvds & LVDS_CLKB_POWER_UP)
10638 0 : clock.p2 = 7;
10639 : else
10640 0 : clock.p2 = 14;
10641 : } else {
10642 0 : if (dpll & PLL_P1_DIVIDE_BY_TWO)
10643 0 : clock.p1 = 2;
10644 : else {
10645 0 : clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10646 0 : DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10647 : }
10648 0 : if (dpll & PLL_P2_DIVIDE_BY_4)
10649 0 : clock.p2 = 4;
10650 : else
10651 0 : clock.p2 = 2;
10652 : }
10653 :
10654 0 : port_clock = i9xx_calc_dpll_params(refclk, &clock);
10655 : }
10656 :
10657 : /*
10658 : * This value includes pixel_multiplier. We will use
10659 : * port_clock to compute adjusted_mode.crtc_clock in the
10660 : * encoder's get_config() function.
10661 : */
10662 0 : pipe_config->port_clock = port_clock;
10663 0 : }
10664 :
10665 0 : int intel_dotclock_calculate(int link_freq,
10666 : const struct intel_link_m_n *m_n)
10667 : {
10668 : /*
10669 : * The calculation for the data clock is:
10670 : * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10671 : * But we want to avoid losing precison if possible, so:
10672 : * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10673 : *
10674 : * and the link clock is simpler:
10675 : * link_clock = (m * link_clock) / n
10676 : */
10677 :
10678 0 : if (!m_n->link_n)
10679 0 : return 0;
10680 :
10681 0 : return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10682 0 : }
10683 :
10684 0 : static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10685 : struct intel_crtc_state *pipe_config)
10686 : {
10687 0 : struct drm_device *dev = crtc->base.dev;
10688 :
10689 : /* read out port_clock from the DPLL */
10690 0 : i9xx_crtc_clock_get(crtc, pipe_config);
10691 :
10692 : /*
10693 : * This value does not include pixel_multiplier.
10694 : * We will check that port_clock and adjusted_mode.crtc_clock
10695 : * agree once we know their relationship in the encoder's
10696 : * get_config() function.
10697 : */
10698 0 : pipe_config->base.adjusted_mode.crtc_clock =
10699 0 : intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10700 0 : &pipe_config->fdi_m_n);
10701 0 : }
10702 :
10703 : /** Returns the currently programmed mode of the given pipe. */
10704 0 : struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10705 : struct drm_crtc *crtc)
10706 : {
10707 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10708 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10709 0 : enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10710 : struct drm_display_mode *mode;
10711 0 : struct intel_crtc_state pipe_config;
10712 0 : int htot = I915_READ(HTOTAL(cpu_transcoder));
10713 0 : int hsync = I915_READ(HSYNC(cpu_transcoder));
10714 0 : int vtot = I915_READ(VTOTAL(cpu_transcoder));
10715 0 : int vsync = I915_READ(VSYNC(cpu_transcoder));
10716 0 : enum pipe pipe = intel_crtc->pipe;
10717 :
10718 0 : mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10719 0 : if (!mode)
10720 0 : return NULL;
10721 :
10722 : /*
10723 : * Construct a pipe_config sufficient for getting the clock info
10724 : * back out of crtc_clock_get.
10725 : *
10726 : * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10727 : * to use a real value here instead.
10728 : */
10729 0 : pipe_config.cpu_transcoder = (enum transcoder) pipe;
10730 0 : pipe_config.pixel_multiplier = 1;
10731 0 : pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10732 0 : pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10733 0 : pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10734 0 : i9xx_crtc_clock_get(intel_crtc, &pipe_config);
10735 :
10736 0 : mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
10737 0 : mode->hdisplay = (htot & 0xffff) + 1;
10738 0 : mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10739 0 : mode->hsync_start = (hsync & 0xffff) + 1;
10740 0 : mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10741 0 : mode->vdisplay = (vtot & 0xffff) + 1;
10742 0 : mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10743 0 : mode->vsync_start = (vsync & 0xffff) + 1;
10744 0 : mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10745 :
10746 0 : drm_mode_set_name(mode);
10747 :
10748 0 : return mode;
10749 0 : }
10750 :
10751 0 : void intel_mark_busy(struct drm_device *dev)
10752 : {
10753 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10754 :
10755 0 : if (dev_priv->mm.busy)
10756 0 : return;
10757 :
10758 0 : intel_runtime_pm_get(dev_priv);
10759 0 : i915_update_gfx_val(dev_priv);
10760 0 : if (INTEL_INFO(dev)->gen >= 6)
10761 0 : gen6_rps_busy(dev_priv);
10762 0 : dev_priv->mm.busy = true;
10763 0 : }
10764 :
10765 0 : void intel_mark_idle(struct drm_device *dev)
10766 : {
10767 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10768 :
10769 0 : if (!dev_priv->mm.busy)
10770 0 : return;
10771 :
10772 0 : dev_priv->mm.busy = false;
10773 :
10774 0 : if (INTEL_INFO(dev)->gen >= 6)
10775 0 : gen6_rps_idle(dev->dev_private);
10776 :
10777 0 : intel_runtime_pm_put(dev_priv);
10778 0 : }
10779 :
10780 0 : static void intel_crtc_destroy(struct drm_crtc *crtc)
10781 : {
10782 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10783 0 : struct drm_device *dev = crtc->dev;
10784 : struct intel_unpin_work *work;
10785 :
10786 0 : spin_lock_irq(&dev->event_lock);
10787 0 : work = intel_crtc->unpin_work;
10788 0 : intel_crtc->unpin_work = NULL;
10789 0 : spin_unlock_irq(&dev->event_lock);
10790 :
10791 0 : if (work) {
10792 0 : cancel_work_sync(&work->work);
10793 0 : kfree(work);
10794 0 : }
10795 :
10796 0 : drm_crtc_cleanup(crtc);
10797 :
10798 0 : kfree(intel_crtc);
10799 0 : }
10800 :
10801 0 : static void intel_unpin_work_fn(struct work_struct *__work)
10802 : {
10803 : struct intel_unpin_work *work =
10804 0 : container_of(__work, struct intel_unpin_work, work);
10805 0 : struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10806 0 : struct drm_device *dev = crtc->base.dev;
10807 0 : struct drm_plane *primary = crtc->base.primary;
10808 :
10809 0 : mutex_lock(&dev->struct_mutex);
10810 0 : intel_unpin_fb_obj(work->old_fb, primary->state);
10811 0 : drm_gem_object_unreference(&work->pending_flip_obj->base);
10812 :
10813 0 : if (work->flip_queued_req)
10814 0 : i915_gem_request_assign(&work->flip_queued_req, NULL);
10815 0 : mutex_unlock(&dev->struct_mutex);
10816 :
10817 0 : intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10818 0 : drm_framebuffer_unreference(work->old_fb);
10819 :
10820 0 : BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10821 0 : atomic_dec(&crtc->unpin_work_count);
10822 :
10823 0 : kfree(work);
10824 0 : }
10825 :
10826 0 : static void do_intel_finish_page_flip(struct drm_device *dev,
10827 : struct drm_crtc *crtc)
10828 : {
10829 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10830 : struct intel_unpin_work *work;
10831 : unsigned long flags;
10832 :
10833 : /* Ignore early vblank irqs */
10834 0 : if (intel_crtc == NULL)
10835 0 : return;
10836 :
10837 : /*
10838 : * This is called both by irq handlers and the reset code (to complete
10839 : * lost pageflips) so needs the full irqsave spinlocks.
10840 : */
10841 0 : spin_lock_irqsave(&dev->event_lock, flags);
10842 0 : work = intel_crtc->unpin_work;
10843 :
10844 : /* Ensure we don't miss a work->pending update ... */
10845 0 : smp_rmb();
10846 :
10847 0 : if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10848 0 : spin_unlock_irqrestore(&dev->event_lock, flags);
10849 0 : return;
10850 : }
10851 :
10852 0 : page_flip_completed(intel_crtc);
10853 :
10854 0 : spin_unlock_irqrestore(&dev->event_lock, flags);
10855 0 : }
10856 :
10857 0 : void intel_finish_page_flip(struct drm_device *dev, int pipe)
10858 : {
10859 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10860 0 : struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10861 :
10862 0 : do_intel_finish_page_flip(dev, crtc);
10863 0 : }
10864 :
10865 0 : void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10866 : {
10867 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10868 0 : struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10869 :
10870 0 : do_intel_finish_page_flip(dev, crtc);
10871 0 : }
10872 :
10873 : /* Is 'a' after or equal to 'b'? */
10874 0 : static bool g4x_flip_count_after_eq(u32 a, u32 b)
10875 : {
10876 0 : return !((a - b) & 0x80000000);
10877 : }
10878 :
10879 0 : static bool page_flip_finished(struct intel_crtc *crtc)
10880 : {
10881 0 : struct drm_device *dev = crtc->base.dev;
10882 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10883 :
10884 0 : if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10885 0 : crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10886 0 : return true;
10887 :
10888 : /*
10889 : * The relevant registers doen't exist on pre-ctg.
10890 : * As the flip done interrupt doesn't trigger for mmio
10891 : * flips on gmch platforms, a flip count check isn't
10892 : * really needed there. But since ctg has the registers,
10893 : * include it in the check anyway.
10894 : */
10895 0 : if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10896 0 : return true;
10897 :
10898 : /*
10899 : * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10900 : * used the same base address. In that case the mmio flip might
10901 : * have completed, but the CS hasn't even executed the flip yet.
10902 : *
10903 : * A flip count check isn't enough as the CS might have updated
10904 : * the base address just after start of vblank, but before we
10905 : * managed to process the interrupt. This means we'd complete the
10906 : * CS flip too soon.
10907 : *
10908 : * Combining both checks should get us a good enough result. It may
10909 : * still happen that the CS flip has been executed, but has not
10910 : * yet actually completed. But in case the base address is the same
10911 : * anyway, we don't really care.
10912 : */
10913 0 : return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10914 0 : crtc->unpin_work->gtt_offset &&
10915 0 : g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10916 0 : crtc->unpin_work->flip_count);
10917 0 : }
10918 :
10919 0 : void intel_prepare_page_flip(struct drm_device *dev, int plane)
10920 : {
10921 0 : struct drm_i915_private *dev_priv = dev->dev_private;
10922 : struct intel_crtc *intel_crtc =
10923 0 : to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
10924 : unsigned long flags;
10925 :
10926 :
10927 : /*
10928 : * This is called both by irq handlers and the reset code (to complete
10929 : * lost pageflips) so needs the full irqsave spinlocks.
10930 : *
10931 : * NB: An MMIO update of the plane base pointer will also
10932 : * generate a page-flip completion irq, i.e. every modeset
10933 : * is also accompanied by a spurious intel_prepare_page_flip().
10934 : */
10935 0 : spin_lock_irqsave(&dev->event_lock, flags);
10936 0 : if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
10937 0 : atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
10938 0 : spin_unlock_irqrestore(&dev->event_lock, flags);
10939 0 : }
10940 :
10941 0 : static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
10942 : {
10943 : /* Ensure that the work item is consistent when activating it ... */
10944 0 : smp_wmb();
10945 0 : atomic_set(&work->pending, INTEL_FLIP_PENDING);
10946 : /* and that it is marked active as soon as the irq could fire. */
10947 0 : smp_wmb();
10948 0 : }
10949 :
10950 0 : static int intel_gen2_queue_flip(struct drm_device *dev,
10951 : struct drm_crtc *crtc,
10952 : struct drm_framebuffer *fb,
10953 : struct drm_i915_gem_object *obj,
10954 : struct drm_i915_gem_request *req,
10955 : uint32_t flags)
10956 : {
10957 0 : struct intel_engine_cs *ring = req->ring;
10958 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10959 : u32 flip_mask;
10960 : int ret;
10961 :
10962 0 : ret = intel_ring_begin(req, 6);
10963 0 : if (ret)
10964 0 : return ret;
10965 :
10966 : /* Can't queue multiple flips, so wait for the previous
10967 : * one to finish before executing the next.
10968 : */
10969 0 : if (intel_crtc->plane)
10970 0 : flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10971 : else
10972 : flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
10973 0 : intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
10974 0 : intel_ring_emit(ring, MI_NOOP);
10975 0 : intel_ring_emit(ring, MI_DISPLAY_FLIP |
10976 0 : MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
10977 0 : intel_ring_emit(ring, fb->pitches[0]);
10978 0 : intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
10979 0 : intel_ring_emit(ring, 0); /* aux display base address, unused */
10980 :
10981 0 : intel_mark_page_flip_active(intel_crtc->unpin_work);
10982 0 : return 0;
10983 0 : }
10984 :
10985 0 : static int intel_gen3_queue_flip(struct drm_device *dev,
10986 : struct drm_crtc *crtc,
10987 : struct drm_framebuffer *fb,
10988 : struct drm_i915_gem_object *obj,
10989 : struct drm_i915_gem_request *req,
10990 : uint32_t flags)
10991 : {
10992 0 : struct intel_engine_cs *ring = req->ring;
10993 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10994 : u32 flip_mask;
10995 : int ret;
10996 :
10997 0 : ret = intel_ring_begin(req, 6);
10998 0 : if (ret)
10999 0 : return ret;
11000 :
11001 0 : if (intel_crtc->plane)
11002 0 : flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11003 : else
11004 : flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11005 0 : intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11006 0 : intel_ring_emit(ring, MI_NOOP);
11007 0 : intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11008 0 : MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11009 0 : intel_ring_emit(ring, fb->pitches[0]);
11010 0 : intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11011 0 : intel_ring_emit(ring, MI_NOOP);
11012 :
11013 0 : intel_mark_page_flip_active(intel_crtc->unpin_work);
11014 0 : return 0;
11015 0 : }
11016 :
11017 0 : static int intel_gen4_queue_flip(struct drm_device *dev,
11018 : struct drm_crtc *crtc,
11019 : struct drm_framebuffer *fb,
11020 : struct drm_i915_gem_object *obj,
11021 : struct drm_i915_gem_request *req,
11022 : uint32_t flags)
11023 : {
11024 0 : struct intel_engine_cs *ring = req->ring;
11025 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11026 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11027 : uint32_t pf, pipesrc;
11028 : int ret;
11029 :
11030 0 : ret = intel_ring_begin(req, 4);
11031 0 : if (ret)
11032 0 : return ret;
11033 :
11034 : /* i965+ uses the linear or tiled offsets from the
11035 : * Display Registers (which do not change across a page-flip)
11036 : * so we need only reprogram the base address.
11037 : */
11038 0 : intel_ring_emit(ring, MI_DISPLAY_FLIP |
11039 0 : MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11040 0 : intel_ring_emit(ring, fb->pitches[0]);
11041 0 : intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
11042 0 : obj->tiling_mode);
11043 :
11044 : /* XXX Enabling the panel-fitter across page-flip is so far
11045 : * untested on non-native modes, so ignore it for now.
11046 : * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11047 : */
11048 : pf = 0;
11049 0 : pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11050 0 : intel_ring_emit(ring, pf | pipesrc);
11051 :
11052 0 : intel_mark_page_flip_active(intel_crtc->unpin_work);
11053 0 : return 0;
11054 0 : }
11055 :
11056 0 : static int intel_gen6_queue_flip(struct drm_device *dev,
11057 : struct drm_crtc *crtc,
11058 : struct drm_framebuffer *fb,
11059 : struct drm_i915_gem_object *obj,
11060 : struct drm_i915_gem_request *req,
11061 : uint32_t flags)
11062 : {
11063 0 : struct intel_engine_cs *ring = req->ring;
11064 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11065 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11066 : uint32_t pf, pipesrc;
11067 : int ret;
11068 :
11069 0 : ret = intel_ring_begin(req, 4);
11070 0 : if (ret)
11071 0 : return ret;
11072 :
11073 0 : intel_ring_emit(ring, MI_DISPLAY_FLIP |
11074 0 : MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11075 0 : intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
11076 0 : intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11077 :
11078 : /* Contrary to the suggestions in the documentation,
11079 : * "Enable Panel Fitter" does not seem to be required when page
11080 : * flipping with a non-native mode, and worse causes a normal
11081 : * modeset to fail.
11082 : * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11083 : */
11084 : pf = 0;
11085 0 : pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11086 0 : intel_ring_emit(ring, pf | pipesrc);
11087 :
11088 0 : intel_mark_page_flip_active(intel_crtc->unpin_work);
11089 0 : return 0;
11090 0 : }
11091 :
11092 0 : static int intel_gen7_queue_flip(struct drm_device *dev,
11093 : struct drm_crtc *crtc,
11094 : struct drm_framebuffer *fb,
11095 : struct drm_i915_gem_object *obj,
11096 : struct drm_i915_gem_request *req,
11097 : uint32_t flags)
11098 : {
11099 0 : struct intel_engine_cs *ring = req->ring;
11100 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11101 : uint32_t plane_bit = 0;
11102 : int len, ret;
11103 :
11104 0 : switch (intel_crtc->plane) {
11105 : case PLANE_A:
11106 : plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11107 0 : break;
11108 : case PLANE_B:
11109 : plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11110 0 : break;
11111 : case PLANE_C:
11112 : plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11113 0 : break;
11114 : default:
11115 0 : WARN_ONCE(1, "unknown plane in flip command\n");
11116 0 : return -ENODEV;
11117 : }
11118 :
11119 : len = 4;
11120 0 : if (ring->id == RCS) {
11121 : len += 6;
11122 : /*
11123 : * On Gen 8, SRM is now taking an extra dword to accommodate
11124 : * 48bits addresses, and we need a NOOP for the batch size to
11125 : * stay even.
11126 : */
11127 0 : if (IS_GEN8(dev))
11128 0 : len += 2;
11129 : }
11130 :
11131 : /*
11132 : * BSpec MI_DISPLAY_FLIP for IVB:
11133 : * "The full packet must be contained within the same cache line."
11134 : *
11135 : * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11136 : * cacheline, if we ever start emitting more commands before
11137 : * the MI_DISPLAY_FLIP we may need to first emit everything else,
11138 : * then do the cacheline alignment, and finally emit the
11139 : * MI_DISPLAY_FLIP.
11140 : */
11141 0 : ret = intel_ring_cacheline_align(req);
11142 0 : if (ret)
11143 0 : return ret;
11144 :
11145 0 : ret = intel_ring_begin(req, len);
11146 0 : if (ret)
11147 0 : return ret;
11148 :
11149 : /* Unmask the flip-done completion message. Note that the bspec says that
11150 : * we should do this for both the BCS and RCS, and that we must not unmask
11151 : * more than one flip event at any time (or ensure that one flip message
11152 : * can be sent by waiting for flip-done prior to queueing new flips).
11153 : * Experimentation says that BCS works despite DERRMR masking all
11154 : * flip-done completion events and that unmasking all planes at once
11155 : * for the RCS also doesn't appear to drop events. Setting the DERRMR
11156 : * to zero does lead to lockups within MI_DISPLAY_FLIP.
11157 : */
11158 0 : if (ring->id == RCS) {
11159 0 : intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11160 0 : intel_ring_emit(ring, DERRMR);
11161 0 : intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11162 : DERRMR_PIPEB_PRI_FLIP_DONE |
11163 : DERRMR_PIPEC_PRI_FLIP_DONE));
11164 0 : if (IS_GEN8(dev))
11165 0 : intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
11166 : MI_SRM_LRM_GLOBAL_GTT);
11167 : else
11168 0 : intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11169 : MI_SRM_LRM_GLOBAL_GTT);
11170 0 : intel_ring_emit(ring, DERRMR);
11171 0 : intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
11172 0 : if (IS_GEN8(dev)) {
11173 0 : intel_ring_emit(ring, 0);
11174 0 : intel_ring_emit(ring, MI_NOOP);
11175 0 : }
11176 : }
11177 :
11178 0 : intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11179 0 : intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
11180 0 : intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11181 0 : intel_ring_emit(ring, (MI_NOOP));
11182 :
11183 0 : intel_mark_page_flip_active(intel_crtc->unpin_work);
11184 0 : return 0;
11185 0 : }
11186 :
11187 0 : static bool use_mmio_flip(struct intel_engine_cs *ring,
11188 : struct drm_i915_gem_object *obj)
11189 : {
11190 : /*
11191 : * This is not being used for older platforms, because
11192 : * non-availability of flip done interrupt forces us to use
11193 : * CS flips. Older platforms derive flip done using some clever
11194 : * tricks involving the flip_pending status bits and vblank irqs.
11195 : * So using MMIO flips there would disrupt this mechanism.
11196 : */
11197 :
11198 0 : if (ring == NULL)
11199 0 : return true;
11200 :
11201 0 : if (INTEL_INFO(ring->dev)->gen < 5)
11202 0 : return false;
11203 :
11204 0 : if (i915.use_mmio_flip < 0)
11205 0 : return false;
11206 0 : else if (i915.use_mmio_flip > 0)
11207 0 : return true;
11208 0 : else if (i915.enable_execlists)
11209 0 : return true;
11210 : else
11211 0 : return ring != i915_gem_request_get_ring(obj->last_write_req);
11212 0 : }
11213 :
11214 0 : static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11215 : struct intel_unpin_work *work)
11216 : {
11217 0 : struct drm_device *dev = intel_crtc->base.dev;
11218 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11219 0 : struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11220 0 : const enum pipe pipe = intel_crtc->pipe;
11221 : u32 ctl, stride;
11222 :
11223 0 : ctl = I915_READ(PLANE_CTL(pipe, 0));
11224 0 : ctl &= ~PLANE_CTL_TILED_MASK;
11225 0 : switch (fb->modifier[0]) {
11226 : case DRM_FORMAT_MOD_NONE:
11227 : break;
11228 : case I915_FORMAT_MOD_X_TILED:
11229 0 : ctl |= PLANE_CTL_TILED_X;
11230 0 : break;
11231 : case I915_FORMAT_MOD_Y_TILED:
11232 0 : ctl |= PLANE_CTL_TILED_Y;
11233 0 : break;
11234 : case I915_FORMAT_MOD_Yf_TILED:
11235 0 : ctl |= PLANE_CTL_TILED_YF;
11236 0 : break;
11237 : default:
11238 0 : MISSING_CASE(fb->modifier[0]);
11239 0 : }
11240 :
11241 : /*
11242 : * The stride is either expressed as a multiple of 64 bytes chunks for
11243 : * linear buffers or in number of tiles for tiled buffers.
11244 : */
11245 0 : stride = fb->pitches[0] /
11246 0 : intel_fb_stride_alignment(dev, fb->modifier[0],
11247 0 : fb->pixel_format);
11248 :
11249 : /*
11250 : * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11251 : * PLANE_SURF updates, the update is then guaranteed to be atomic.
11252 : */
11253 0 : I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11254 0 : I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11255 :
11256 0 : I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11257 0 : POSTING_READ(PLANE_SURF(pipe, 0));
11258 0 : }
11259 :
11260 0 : static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11261 : struct intel_unpin_work *work)
11262 : {
11263 0 : struct drm_device *dev = intel_crtc->base.dev;
11264 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11265 : struct intel_framebuffer *intel_fb =
11266 0 : to_intel_framebuffer(intel_crtc->base.primary->fb);
11267 0 : struct drm_i915_gem_object *obj = intel_fb->obj;
11268 : u32 dspcntr;
11269 : u32 reg;
11270 :
11271 0 : reg = DSPCNTR(intel_crtc->plane);
11272 0 : dspcntr = I915_READ(reg);
11273 :
11274 0 : if (obj->tiling_mode != I915_TILING_NONE)
11275 0 : dspcntr |= DISPPLANE_TILED;
11276 : else
11277 0 : dspcntr &= ~DISPPLANE_TILED;
11278 :
11279 0 : I915_WRITE(reg, dspcntr);
11280 :
11281 0 : I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11282 0 : POSTING_READ(DSPSURF(intel_crtc->plane));
11283 0 : }
11284 :
11285 : /*
11286 : * XXX: This is the temporary way to update the plane registers until we get
11287 : * around to using the usual plane update functions for MMIO flips
11288 : */
11289 0 : static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11290 : {
11291 0 : struct intel_crtc *crtc = mmio_flip->crtc;
11292 : struct intel_unpin_work *work;
11293 :
11294 0 : spin_lock_irq(&crtc->base.dev->event_lock);
11295 0 : work = crtc->unpin_work;
11296 0 : spin_unlock_irq(&crtc->base.dev->event_lock);
11297 0 : if (work == NULL)
11298 0 : return;
11299 :
11300 0 : intel_mark_page_flip_active(work);
11301 :
11302 0 : intel_pipe_update_start(crtc);
11303 :
11304 0 : if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11305 0 : skl_do_mmio_flip(crtc, work);
11306 : else
11307 : /* use_mmio_flip() retricts MMIO flips to ilk+ */
11308 0 : ilk_do_mmio_flip(crtc, work);
11309 :
11310 0 : intel_pipe_update_end(crtc);
11311 0 : }
11312 :
11313 0 : static void intel_mmio_flip_work_func(struct work_struct *work)
11314 : {
11315 : struct intel_mmio_flip *mmio_flip =
11316 0 : container_of(work, struct intel_mmio_flip, work);
11317 :
11318 0 : if (mmio_flip->req) {
11319 0 : WARN_ON(__i915_wait_request(mmio_flip->req,
11320 : mmio_flip->crtc->reset_counter,
11321 : false, NULL,
11322 : &mmio_flip->i915->rps.mmioflips));
11323 0 : i915_gem_request_unreference__unlocked(mmio_flip->req);
11324 0 : }
11325 :
11326 0 : intel_do_mmio_flip(mmio_flip);
11327 0 : kfree(mmio_flip);
11328 0 : }
11329 :
11330 0 : static int intel_queue_mmio_flip(struct drm_device *dev,
11331 : struct drm_crtc *crtc,
11332 : struct drm_framebuffer *fb,
11333 : struct drm_i915_gem_object *obj,
11334 : struct intel_engine_cs *ring,
11335 : uint32_t flags)
11336 : {
11337 : struct intel_mmio_flip *mmio_flip;
11338 :
11339 0 : mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11340 0 : if (mmio_flip == NULL)
11341 0 : return -ENOMEM;
11342 :
11343 0 : mmio_flip->i915 = to_i915(dev);
11344 0 : mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11345 0 : mmio_flip->crtc = to_intel_crtc(crtc);
11346 :
11347 0 : INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11348 0 : schedule_work(&mmio_flip->work);
11349 :
11350 0 : return 0;
11351 0 : }
11352 :
11353 0 : static int intel_default_queue_flip(struct drm_device *dev,
11354 : struct drm_crtc *crtc,
11355 : struct drm_framebuffer *fb,
11356 : struct drm_i915_gem_object *obj,
11357 : struct drm_i915_gem_request *req,
11358 : uint32_t flags)
11359 : {
11360 0 : return -ENODEV;
11361 : }
11362 :
11363 0 : static bool __intel_pageflip_stall_check(struct drm_device *dev,
11364 : struct drm_crtc *crtc)
11365 : {
11366 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11367 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11368 0 : struct intel_unpin_work *work = intel_crtc->unpin_work;
11369 : u32 addr;
11370 :
11371 0 : if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11372 0 : return true;
11373 :
11374 0 : if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11375 0 : return false;
11376 :
11377 0 : if (!work->enable_stall_check)
11378 0 : return false;
11379 :
11380 0 : if (work->flip_ready_vblank == 0) {
11381 0 : if (work->flip_queued_req &&
11382 0 : !i915_gem_request_completed(work->flip_queued_req, true))
11383 0 : return false;
11384 :
11385 0 : work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11386 0 : }
11387 :
11388 0 : if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11389 0 : return false;
11390 :
11391 : /* Potential stall - if we see that the flip has happened,
11392 : * assume a missed interrupt. */
11393 0 : if (INTEL_INFO(dev)->gen >= 4)
11394 0 : addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11395 : else
11396 0 : addr = I915_READ(DSPADDR(intel_crtc->plane));
11397 :
11398 : /* There is a potential issue here with a false positive after a flip
11399 : * to the same address. We could address this by checking for a
11400 : * non-incrementing frame counter.
11401 : */
11402 0 : return addr == work->gtt_offset;
11403 0 : }
11404 :
11405 0 : void intel_check_page_flip(struct drm_device *dev, int pipe)
11406 : {
11407 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11408 0 : struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11409 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11410 : struct intel_unpin_work *work;
11411 :
11412 : // WARN_ON(!in_interrupt());
11413 :
11414 0 : if (crtc == NULL)
11415 0 : return;
11416 :
11417 0 : spin_lock(&dev->event_lock);
11418 0 : work = intel_crtc->unpin_work;
11419 0 : if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11420 0 : WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11421 : work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11422 0 : page_flip_completed(intel_crtc);
11423 : work = NULL;
11424 0 : }
11425 0 : if (work != NULL &&
11426 0 : drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11427 0 : intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11428 0 : spin_unlock(&dev->event_lock);
11429 0 : }
11430 :
11431 0 : static int intel_crtc_page_flip(struct drm_crtc *crtc,
11432 : struct drm_framebuffer *fb,
11433 : struct drm_pending_vblank_event *event,
11434 : uint32_t page_flip_flags)
11435 : {
11436 0 : struct drm_device *dev = crtc->dev;
11437 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11438 0 : struct drm_framebuffer *old_fb = crtc->primary->fb;
11439 0 : struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11440 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11441 0 : struct drm_plane *primary = crtc->primary;
11442 0 : enum pipe pipe = intel_crtc->pipe;
11443 : struct intel_unpin_work *work;
11444 : struct intel_engine_cs *ring;
11445 : bool mmio_flip;
11446 0 : struct drm_i915_gem_request *request = NULL;
11447 : int ret;
11448 :
11449 : /*
11450 : * drm_mode_page_flip_ioctl() should already catch this, but double
11451 : * check to be safe. In the future we may enable pageflipping from
11452 : * a disabled primary plane.
11453 : */
11454 0 : if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11455 0 : return -EBUSY;
11456 :
11457 : /* Can't change pixel format via MI display flips. */
11458 0 : if (fb->pixel_format != crtc->primary->fb->pixel_format)
11459 0 : return -EINVAL;
11460 :
11461 : /*
11462 : * TILEOFF/LINOFF registers can't be changed via MI display flips.
11463 : * Note that pitch changes could also affect these register.
11464 : */
11465 0 : if (INTEL_INFO(dev)->gen > 3 &&
11466 0 : (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11467 0 : fb->pitches[0] != crtc->primary->fb->pitches[0]))
11468 0 : return -EINVAL;
11469 :
11470 0 : if (i915_terminally_wedged(&dev_priv->gpu_error))
11471 : goto out_hang;
11472 :
11473 0 : work = kzalloc(sizeof(*work), GFP_KERNEL);
11474 0 : if (work == NULL)
11475 0 : return -ENOMEM;
11476 :
11477 0 : work->event = event;
11478 0 : work->crtc = crtc;
11479 0 : work->old_fb = old_fb;
11480 0 : INIT_WORK(&work->work, intel_unpin_work_fn);
11481 :
11482 0 : ret = drm_crtc_vblank_get(crtc);
11483 0 : if (ret)
11484 : goto free_work;
11485 :
11486 : /* We borrow the event spin lock for protecting unpin_work */
11487 0 : spin_lock_irq(&dev->event_lock);
11488 0 : if (intel_crtc->unpin_work) {
11489 : /* Before declaring the flip queue wedged, check if
11490 : * the hardware completed the operation behind our backs.
11491 : */
11492 0 : if (__intel_pageflip_stall_check(dev, crtc)) {
11493 : DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11494 0 : page_flip_completed(intel_crtc);
11495 : } else {
11496 : DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11497 0 : spin_unlock_irq(&dev->event_lock);
11498 :
11499 0 : drm_crtc_vblank_put(crtc);
11500 0 : kfree(work);
11501 0 : return -EBUSY;
11502 : }
11503 0 : }
11504 0 : intel_crtc->unpin_work = work;
11505 0 : spin_unlock_irq(&dev->event_lock);
11506 :
11507 0 : if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11508 0 : flush_workqueue(dev_priv->wq);
11509 :
11510 : /* Reference the objects for the scheduled work. */
11511 0 : drm_framebuffer_reference(work->old_fb);
11512 0 : drm_gem_object_reference(&obj->base);
11513 :
11514 0 : crtc->primary->fb = fb;
11515 0 : update_state_fb(crtc->primary);
11516 :
11517 0 : work->pending_flip_obj = obj;
11518 :
11519 0 : ret = i915_mutex_lock_interruptible(dev);
11520 0 : if (ret)
11521 : goto cleanup;
11522 :
11523 0 : atomic_inc(&intel_crtc->unpin_work_count);
11524 0 : intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
11525 :
11526 0 : if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11527 0 : work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11528 :
11529 0 : if (IS_VALLEYVIEW(dev)) {
11530 0 : ring = &dev_priv->ring[BCS];
11531 0 : if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11532 : /* vlv: DISPLAY_FLIP fails to change tiling */
11533 0 : ring = NULL;
11534 0 : } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11535 0 : ring = &dev_priv->ring[BCS];
11536 0 : } else if (INTEL_INFO(dev)->gen >= 7) {
11537 0 : ring = i915_gem_request_get_ring(obj->last_write_req);
11538 0 : if (ring == NULL || ring->id != RCS)
11539 0 : ring = &dev_priv->ring[BCS];
11540 : } else {
11541 0 : ring = &dev_priv->ring[RCS];
11542 : }
11543 :
11544 0 : mmio_flip = use_mmio_flip(ring, obj);
11545 :
11546 : /* When using CS flips, we want to emit semaphores between rings.
11547 : * However, when using mmio flips we will create a task to do the
11548 : * synchronisation, so all we want here is to pin the framebuffer
11549 : * into the display plane and skip any waits.
11550 : */
11551 0 : ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11552 0 : crtc->primary->state,
11553 0 : mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
11554 0 : if (ret)
11555 : goto cleanup_pending;
11556 :
11557 0 : work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11558 : obj, 0);
11559 0 : work->gtt_offset += intel_crtc->dspaddr_offset;
11560 :
11561 0 : if (mmio_flip) {
11562 0 : ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
11563 : page_flip_flags);
11564 0 : if (ret)
11565 : goto cleanup_unpin;
11566 :
11567 0 : i915_gem_request_assign(&work->flip_queued_req,
11568 0 : obj->last_write_req);
11569 0 : } else {
11570 0 : if (!request) {
11571 0 : ret = i915_gem_request_alloc(ring, ring->default_context, &request);
11572 0 : if (ret)
11573 : goto cleanup_unpin;
11574 : }
11575 :
11576 0 : ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11577 : page_flip_flags);
11578 0 : if (ret)
11579 : goto cleanup_unpin;
11580 :
11581 0 : i915_gem_request_assign(&work->flip_queued_req, request);
11582 : }
11583 :
11584 0 : if (request)
11585 0 : i915_add_request_no_flush(request);
11586 :
11587 0 : work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11588 0 : work->enable_stall_check = true;
11589 :
11590 0 : i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11591 0 : to_intel_plane(primary)->frontbuffer_bit);
11592 0 : mutex_unlock(&dev->struct_mutex);
11593 :
11594 0 : intel_fbc_disable_crtc(intel_crtc);
11595 0 : intel_frontbuffer_flip_prepare(dev,
11596 0 : to_intel_plane(primary)->frontbuffer_bit);
11597 :
11598 0 : trace_i915_flip_request(intel_crtc->plane, obj);
11599 :
11600 0 : return 0;
11601 :
11602 : cleanup_unpin:
11603 0 : intel_unpin_fb_obj(fb, crtc->primary->state);
11604 : cleanup_pending:
11605 0 : if (request)
11606 0 : i915_gem_request_cancel(request);
11607 0 : atomic_dec(&intel_crtc->unpin_work_count);
11608 0 : mutex_unlock(&dev->struct_mutex);
11609 : cleanup:
11610 0 : crtc->primary->fb = old_fb;
11611 0 : update_state_fb(crtc->primary);
11612 :
11613 0 : drm_gem_object_unreference_unlocked(&obj->base);
11614 0 : drm_framebuffer_unreference(work->old_fb);
11615 :
11616 0 : spin_lock_irq(&dev->event_lock);
11617 0 : intel_crtc->unpin_work = NULL;
11618 0 : spin_unlock_irq(&dev->event_lock);
11619 :
11620 0 : drm_crtc_vblank_put(crtc);
11621 : free_work:
11622 0 : kfree(work);
11623 :
11624 0 : if (ret == -EIO) {
11625 : struct drm_atomic_state *state;
11626 : struct drm_plane_state *plane_state;
11627 :
11628 : out_hang:
11629 0 : state = drm_atomic_state_alloc(dev);
11630 0 : if (!state)
11631 0 : return -ENOMEM;
11632 0 : state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11633 :
11634 : retry:
11635 0 : plane_state = drm_atomic_get_plane_state(state, primary);
11636 0 : ret = PTR_ERR_OR_ZERO(plane_state);
11637 0 : if (!ret) {
11638 0 : drm_atomic_set_fb_for_plane(plane_state, fb);
11639 :
11640 0 : ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11641 0 : if (!ret)
11642 0 : ret = drm_atomic_commit(state);
11643 : }
11644 :
11645 0 : if (ret == -EDEADLK) {
11646 0 : drm_modeset_backoff(state->acquire_ctx);
11647 0 : drm_atomic_state_clear(state);
11648 0 : goto retry;
11649 : }
11650 :
11651 0 : if (ret)
11652 0 : drm_atomic_state_free(state);
11653 :
11654 0 : if (ret == 0 && event) {
11655 0 : spin_lock_irq(&dev->event_lock);
11656 0 : drm_send_vblank_event(dev, pipe, event);
11657 0 : spin_unlock_irq(&dev->event_lock);
11658 0 : }
11659 : }
11660 0 : return ret;
11661 0 : }
11662 :
11663 :
11664 : /**
11665 : * intel_wm_need_update - Check whether watermarks need updating
11666 : * @plane: drm plane
11667 : * @state: new plane state
11668 : *
11669 : * Check current plane state versus the new one to determine whether
11670 : * watermarks need to be recalculated.
11671 : *
11672 : * Returns true or false.
11673 : */
11674 0 : static bool intel_wm_need_update(struct drm_plane *plane,
11675 : struct drm_plane_state *state)
11676 : {
11677 : /* Update watermarks on tiling changes. */
11678 0 : if (!plane->state->fb || !state->fb ||
11679 0 : plane->state->fb->modifier[0] != state->fb->modifier[0] ||
11680 0 : plane->state->rotation != state->rotation)
11681 0 : return true;
11682 :
11683 0 : if (plane->state->crtc_w != state->crtc_w)
11684 0 : return true;
11685 :
11686 0 : return false;
11687 0 : }
11688 :
11689 0 : int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11690 : struct drm_plane_state *plane_state)
11691 : {
11692 0 : struct drm_crtc *crtc = crtc_state->crtc;
11693 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11694 0 : struct drm_plane *plane = plane_state->plane;
11695 0 : struct drm_device *dev = crtc->dev;
11696 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11697 : struct intel_plane_state *old_plane_state =
11698 0 : to_intel_plane_state(plane->state);
11699 : #ifdef DRMDEBUG
11700 : int idx = intel_crtc->base.base.id, ret;
11701 : #else
11702 : int ret;
11703 : #endif
11704 0 : int i = drm_plane_index(plane);
11705 0 : bool mode_changed = needs_modeset(crtc_state);
11706 0 : bool was_crtc_enabled = crtc->state->active;
11707 0 : bool is_crtc_enabled = crtc_state->active;
11708 :
11709 : bool turn_off, turn_on, visible, was_visible;
11710 0 : struct drm_framebuffer *fb = plane_state->fb;
11711 :
11712 0 : if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11713 0 : plane->type != DRM_PLANE_TYPE_CURSOR) {
11714 0 : ret = skl_update_scaler_plane(
11715 0 : to_intel_crtc_state(crtc_state),
11716 0 : to_intel_plane_state(plane_state));
11717 0 : if (ret)
11718 0 : return ret;
11719 : }
11720 :
11721 : /*
11722 : * Disabling a plane is always okay; we just need to update
11723 : * fb tracking in a special way since cleanup_fb() won't
11724 : * get called by the plane helpers.
11725 : */
11726 0 : if (old_plane_state->base.fb && !fb)
11727 0 : intel_crtc->atomic.disabled_planes |= 1 << i;
11728 :
11729 0 : was_visible = old_plane_state->visible;
11730 0 : visible = to_intel_plane_state(plane_state)->visible;
11731 :
11732 0 : if (!was_crtc_enabled && WARN_ON(was_visible))
11733 0 : was_visible = false;
11734 :
11735 0 : if (!is_crtc_enabled && WARN_ON(visible))
11736 0 : visible = false;
11737 :
11738 0 : if (!was_visible && !visible)
11739 0 : return 0;
11740 :
11741 0 : turn_off = was_visible && (!visible || mode_changed);
11742 0 : turn_on = visible && (!was_visible || mode_changed);
11743 :
11744 : DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11745 : plane->base.id, fb ? fb->base.id : -1);
11746 :
11747 : DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11748 : plane->base.id, was_visible, visible,
11749 : turn_off, turn_on, mode_changed);
11750 :
11751 0 : if (turn_on) {
11752 0 : intel_crtc->atomic.update_wm_pre = true;
11753 : /* must disable cxsr around plane enable/disable */
11754 0 : if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11755 0 : intel_crtc->atomic.disable_cxsr = true;
11756 : /* to potentially re-enable cxsr */
11757 0 : intel_crtc->atomic.wait_vblank = true;
11758 0 : intel_crtc->atomic.update_wm_post = true;
11759 0 : }
11760 0 : } else if (turn_off) {
11761 0 : intel_crtc->atomic.update_wm_post = true;
11762 : /* must disable cxsr around plane enable/disable */
11763 0 : if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11764 0 : if (is_crtc_enabled)
11765 0 : intel_crtc->atomic.wait_vblank = true;
11766 0 : intel_crtc->atomic.disable_cxsr = true;
11767 0 : }
11768 0 : } else if (intel_wm_need_update(plane, plane_state)) {
11769 0 : intel_crtc->atomic.update_wm_pre = true;
11770 0 : }
11771 :
11772 0 : if (visible || was_visible)
11773 0 : intel_crtc->atomic.fb_bits |=
11774 0 : to_intel_plane(plane)->frontbuffer_bit;
11775 :
11776 0 : switch (plane->type) {
11777 : case DRM_PLANE_TYPE_PRIMARY:
11778 0 : intel_crtc->atomic.wait_for_flips = true;
11779 0 : intel_crtc->atomic.pre_disable_primary = turn_off;
11780 0 : intel_crtc->atomic.post_enable_primary = turn_on;
11781 :
11782 0 : if (turn_off) {
11783 : /*
11784 : * FIXME: Actually if we will still have any other
11785 : * plane enabled on the pipe we could let IPS enabled
11786 : * still, but for now lets consider that when we make
11787 : * primary invisible by setting DSPCNTR to 0 on
11788 : * update_primary_plane function IPS needs to be
11789 : * disable.
11790 : */
11791 0 : intel_crtc->atomic.disable_ips = true;
11792 :
11793 0 : intel_crtc->atomic.disable_fbc = true;
11794 0 : }
11795 :
11796 : /*
11797 : * FBC does not work on some platforms for rotated
11798 : * planes, so disable it when rotation is not 0 and
11799 : * update it when rotation is set back to 0.
11800 : *
11801 : * FIXME: This is redundant with the fbc update done in
11802 : * the primary plane enable function except that that
11803 : * one is done too late. We eventually need to unify
11804 : * this.
11805 : */
11806 :
11807 0 : if (visible &&
11808 0 : INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11809 0 : dev_priv->fbc.crtc == intel_crtc &&
11810 0 : plane_state->rotation != BIT(DRM_ROTATE_0))
11811 0 : intel_crtc->atomic.disable_fbc = true;
11812 :
11813 : /*
11814 : * BDW signals flip done immediately if the plane
11815 : * is disabled, even if the plane enable is already
11816 : * armed to occur at the next vblank :(
11817 : */
11818 0 : if (turn_on && IS_BROADWELL(dev))
11819 0 : intel_crtc->atomic.wait_vblank = true;
11820 :
11821 0 : intel_crtc->atomic.update_fbc |= visible || mode_changed;
11822 0 : break;
11823 : case DRM_PLANE_TYPE_CURSOR:
11824 : break;
11825 : case DRM_PLANE_TYPE_OVERLAY:
11826 0 : if (turn_off && !mode_changed) {
11827 0 : intel_crtc->atomic.wait_vblank = true;
11828 0 : intel_crtc->atomic.update_sprite_watermarks |=
11829 0 : 1 << i;
11830 0 : }
11831 : }
11832 0 : return 0;
11833 0 : }
11834 :
11835 0 : static bool encoders_cloneable(const struct intel_encoder *a,
11836 : const struct intel_encoder *b)
11837 : {
11838 : /* masks could be asymmetric, so check both ways */
11839 0 : return a == b || (a->cloneable & (1 << b->type) &&
11840 0 : b->cloneable & (1 << a->type));
11841 : }
11842 :
11843 0 : static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11844 : struct intel_crtc *crtc,
11845 : struct intel_encoder *encoder)
11846 : {
11847 : struct intel_encoder *source_encoder;
11848 : struct drm_connector *connector;
11849 : struct drm_connector_state *connector_state;
11850 : int i;
11851 :
11852 0 : for_each_connector_in_state(state, connector, connector_state, i) {
11853 0 : if (connector_state->crtc != &crtc->base)
11854 : continue;
11855 :
11856 : source_encoder =
11857 0 : to_intel_encoder(connector_state->best_encoder);
11858 0 : if (!encoders_cloneable(encoder, source_encoder))
11859 0 : return false;
11860 : }
11861 :
11862 0 : return true;
11863 0 : }
11864 :
11865 0 : static bool check_encoder_cloning(struct drm_atomic_state *state,
11866 : struct intel_crtc *crtc)
11867 : {
11868 : struct intel_encoder *encoder;
11869 : struct drm_connector *connector;
11870 : struct drm_connector_state *connector_state;
11871 : int i;
11872 :
11873 0 : for_each_connector_in_state(state, connector, connector_state, i) {
11874 0 : if (connector_state->crtc != &crtc->base)
11875 : continue;
11876 :
11877 0 : encoder = to_intel_encoder(connector_state->best_encoder);
11878 0 : if (!check_single_encoder_cloning(state, crtc, encoder))
11879 0 : return false;
11880 : }
11881 :
11882 0 : return true;
11883 0 : }
11884 :
11885 0 : static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11886 : struct drm_crtc_state *crtc_state)
11887 : {
11888 0 : struct drm_device *dev = crtc->dev;
11889 0 : struct drm_i915_private *dev_priv = dev->dev_private;
11890 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11891 : struct intel_crtc_state *pipe_config =
11892 0 : to_intel_crtc_state(crtc_state);
11893 0 : struct drm_atomic_state *state = crtc_state->state;
11894 : int ret;
11895 0 : bool mode_changed = needs_modeset(crtc_state);
11896 :
11897 0 : if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11898 : DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11899 0 : return -EINVAL;
11900 : }
11901 :
11902 0 : if (mode_changed && !crtc_state->active)
11903 0 : intel_crtc->atomic.update_wm_post = true;
11904 :
11905 0 : if (mode_changed && crtc_state->enable &&
11906 0 : dev_priv->display.crtc_compute_clock &&
11907 0 : !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
11908 0 : ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11909 : pipe_config);
11910 0 : if (ret)
11911 0 : return ret;
11912 : }
11913 :
11914 : ret = 0;
11915 0 : if (INTEL_INFO(dev)->gen >= 9) {
11916 0 : if (mode_changed)
11917 0 : ret = skl_update_scaler_crtc(pipe_config);
11918 :
11919 0 : if (!ret)
11920 0 : ret = intel_atomic_setup_scalers(dev, intel_crtc,
11921 : pipe_config);
11922 : }
11923 :
11924 0 : return ret;
11925 0 : }
11926 :
11927 : static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11928 : .mode_set_base_atomic = intel_pipe_set_base_atomic,
11929 : .load_lut = intel_crtc_load_lut,
11930 : .atomic_begin = intel_begin_crtc_commit,
11931 : .atomic_flush = intel_finish_crtc_commit,
11932 : .atomic_check = intel_crtc_atomic_check,
11933 : };
11934 :
11935 0 : static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11936 : {
11937 : struct intel_connector *connector;
11938 :
11939 0 : for_each_intel_connector(dev, connector) {
11940 0 : if (connector->base.encoder) {
11941 0 : connector->base.state->best_encoder =
11942 : connector->base.encoder;
11943 0 : connector->base.state->crtc =
11944 0 : connector->base.encoder->crtc;
11945 0 : } else {
11946 0 : connector->base.state->best_encoder = NULL;
11947 0 : connector->base.state->crtc = NULL;
11948 : }
11949 : }
11950 0 : }
11951 :
11952 : static void
11953 0 : connected_sink_compute_bpp(struct intel_connector *connector,
11954 : struct intel_crtc_state *pipe_config)
11955 : {
11956 0 : int bpp = pipe_config->pipe_bpp;
11957 :
11958 : DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
11959 : connector->base.base.id,
11960 : connector->base.name);
11961 :
11962 : /* Don't use an invalid EDID bpc value */
11963 0 : if (connector->base.display_info.bpc &&
11964 0 : connector->base.display_info.bpc * 3 < bpp) {
11965 : DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
11966 : bpp, connector->base.display_info.bpc*3);
11967 0 : pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
11968 0 : }
11969 :
11970 : /* Clamp bpp to 8 on screens without EDID 1.4 */
11971 0 : if (connector->base.display_info.bpc == 0 && bpp > 24) {
11972 : DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
11973 : bpp);
11974 0 : pipe_config->pipe_bpp = 24;
11975 0 : }
11976 0 : }
11977 :
11978 : static int
11979 0 : compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11980 : struct intel_crtc_state *pipe_config)
11981 : {
11982 0 : struct drm_device *dev = crtc->base.dev;
11983 : struct drm_atomic_state *state;
11984 : struct drm_connector *connector;
11985 : struct drm_connector_state *connector_state;
11986 : int bpp, i;
11987 :
11988 0 : if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
11989 0 : bpp = 10*3;
11990 0 : else if (INTEL_INFO(dev)->gen >= 5)
11991 0 : bpp = 12*3;
11992 : else
11993 : bpp = 8*3;
11994 :
11995 :
11996 0 : pipe_config->pipe_bpp = bpp;
11997 :
11998 0 : state = pipe_config->base.state;
11999 :
12000 : /* Clamp display bpp to EDID value */
12001 0 : for_each_connector_in_state(state, connector, connector_state, i) {
12002 0 : if (connector_state->crtc != &crtc->base)
12003 : continue;
12004 :
12005 0 : connected_sink_compute_bpp(to_intel_connector(connector),
12006 : pipe_config);
12007 0 : }
12008 :
12009 0 : return bpp;
12010 : }
12011 :
12012 0 : static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12013 : {
12014 : DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12015 : "type: 0x%x flags: 0x%x\n",
12016 : mode->crtc_clock,
12017 : mode->crtc_hdisplay, mode->crtc_hsync_start,
12018 : mode->crtc_hsync_end, mode->crtc_htotal,
12019 : mode->crtc_vdisplay, mode->crtc_vsync_start,
12020 : mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12021 0 : }
12022 :
12023 0 : static void intel_dump_pipe_config(struct intel_crtc *crtc,
12024 : struct intel_crtc_state *pipe_config,
12025 : const char *context)
12026 : {
12027 0 : struct drm_device *dev = crtc->base.dev;
12028 : struct drm_plane *plane;
12029 : struct intel_plane *intel_plane;
12030 : struct intel_plane_state *state;
12031 : struct drm_framebuffer *fb;
12032 :
12033 : DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12034 : context, pipe_config, pipe_name(crtc->pipe));
12035 :
12036 : DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12037 : DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12038 : pipe_config->pipe_bpp, pipe_config->dither);
12039 : DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12040 : pipe_config->has_pch_encoder,
12041 : pipe_config->fdi_lanes,
12042 : pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12043 : pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12044 : pipe_config->fdi_m_n.tu);
12045 : DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12046 : pipe_config->has_dp_encoder,
12047 : pipe_config->lane_count,
12048 : pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12049 : pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12050 : pipe_config->dp_m_n.tu);
12051 :
12052 : DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12053 : pipe_config->has_dp_encoder,
12054 : pipe_config->lane_count,
12055 : pipe_config->dp_m2_n2.gmch_m,
12056 : pipe_config->dp_m2_n2.gmch_n,
12057 : pipe_config->dp_m2_n2.link_m,
12058 : pipe_config->dp_m2_n2.link_n,
12059 : pipe_config->dp_m2_n2.tu);
12060 :
12061 : DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12062 : pipe_config->has_audio,
12063 : pipe_config->has_infoframe);
12064 :
12065 : DRM_DEBUG_KMS("requested mode:\n");
12066 0 : drm_mode_debug_printmodeline(&pipe_config->base.mode);
12067 : DRM_DEBUG_KMS("adjusted mode:\n");
12068 0 : drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12069 0 : intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12070 : DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12071 : DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12072 : pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12073 : DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12074 : crtc->num_scalers,
12075 : pipe_config->scaler_state.scaler_users,
12076 : pipe_config->scaler_state.scaler_id);
12077 : DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12078 : pipe_config->gmch_pfit.control,
12079 : pipe_config->gmch_pfit.pgm_ratios,
12080 : pipe_config->gmch_pfit.lvds_border_bits);
12081 : DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12082 : pipe_config->pch_pfit.pos,
12083 : pipe_config->pch_pfit.size,
12084 : pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12085 : DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12086 : DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12087 :
12088 0 : if (IS_BROXTON(dev)) {
12089 : DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12090 : "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12091 : "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12092 : pipe_config->ddi_pll_sel,
12093 : pipe_config->dpll_hw_state.ebb0,
12094 : pipe_config->dpll_hw_state.ebb4,
12095 : pipe_config->dpll_hw_state.pll0,
12096 : pipe_config->dpll_hw_state.pll1,
12097 : pipe_config->dpll_hw_state.pll2,
12098 : pipe_config->dpll_hw_state.pll3,
12099 : pipe_config->dpll_hw_state.pll6,
12100 : pipe_config->dpll_hw_state.pll8,
12101 : pipe_config->dpll_hw_state.pll9,
12102 : pipe_config->dpll_hw_state.pll10,
12103 : pipe_config->dpll_hw_state.pcsdw12);
12104 0 : } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12105 : DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12106 : "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12107 : pipe_config->ddi_pll_sel,
12108 : pipe_config->dpll_hw_state.ctrl1,
12109 : pipe_config->dpll_hw_state.cfgcr1,
12110 : pipe_config->dpll_hw_state.cfgcr2);
12111 0 : } else if (HAS_DDI(dev)) {
12112 : DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12113 : pipe_config->ddi_pll_sel,
12114 : pipe_config->dpll_hw_state.wrpll,
12115 : pipe_config->dpll_hw_state.spll);
12116 : } else {
12117 : DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12118 : "fp0: 0x%x, fp1: 0x%x\n",
12119 : pipe_config->dpll_hw_state.dpll,
12120 : pipe_config->dpll_hw_state.dpll_md,
12121 : pipe_config->dpll_hw_state.fp0,
12122 : pipe_config->dpll_hw_state.fp1);
12123 : }
12124 :
12125 : DRM_DEBUG_KMS("planes on this crtc\n");
12126 0 : list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12127 0 : intel_plane = to_intel_plane(plane);
12128 0 : if (intel_plane->pipe != crtc->pipe)
12129 : continue;
12130 :
12131 0 : state = to_intel_plane_state(plane->state);
12132 0 : fb = state->base.fb;
12133 0 : if (!fb) {
12134 : DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12135 : "disabled, scaler_id = %d\n",
12136 : plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12137 : plane->base.id, intel_plane->pipe,
12138 : (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12139 : drm_plane_index(plane), state->scaler_id);
12140 : continue;
12141 : }
12142 :
12143 : DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12144 : plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12145 : plane->base.id, intel_plane->pipe,
12146 : crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12147 : drm_plane_index(plane));
12148 : DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12149 : fb->base.id, fb->width, fb->height, fb->pixel_format);
12150 : DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12151 : state->scaler_id,
12152 : state->src.x1 >> 16, state->src.y1 >> 16,
12153 : drm_rect_width(&state->src) >> 16,
12154 : drm_rect_height(&state->src) >> 16,
12155 : state->dst.x1, state->dst.y1,
12156 : drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12157 : }
12158 0 : }
12159 :
12160 0 : static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12161 : {
12162 0 : struct drm_device *dev = state->dev;
12163 : struct drm_connector *connector;
12164 : unsigned int used_ports = 0;
12165 :
12166 : /*
12167 : * Walk the connector list instead of the encoder
12168 : * list to detect the problem on ddi platforms
12169 : * where there's just one encoder per digital port.
12170 : */
12171 0 : drm_for_each_connector(connector, dev) {
12172 : struct drm_connector_state *connector_state;
12173 : struct intel_encoder *encoder;
12174 :
12175 0 : connector_state = drm_atomic_get_existing_connector_state(state, connector);
12176 0 : if (!connector_state)
12177 0 : connector_state = connector->state;
12178 :
12179 0 : if (!connector_state->best_encoder)
12180 0 : continue;
12181 :
12182 0 : encoder = to_intel_encoder(connector_state->best_encoder);
12183 :
12184 0 : WARN_ON(!connector_state->crtc);
12185 :
12186 0 : switch (encoder->type) {
12187 : unsigned int port_mask;
12188 : case INTEL_OUTPUT_UNKNOWN:
12189 0 : if (WARN_ON(!HAS_DDI(dev)))
12190 : break;
12191 : case INTEL_OUTPUT_DISPLAYPORT:
12192 : case INTEL_OUTPUT_HDMI:
12193 : case INTEL_OUTPUT_EDP:
12194 0 : port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12195 :
12196 : /* the same port mustn't appear more than once */
12197 0 : if (used_ports & port_mask)
12198 0 : return false;
12199 :
12200 0 : used_ports |= port_mask;
12201 : default:
12202 : break;
12203 : }
12204 0 : }
12205 :
12206 0 : return true;
12207 0 : }
12208 :
12209 : static void
12210 0 : clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12211 : {
12212 0 : struct drm_crtc_state tmp_state;
12213 0 : struct intel_crtc_scaler_state scaler_state;
12214 0 : struct intel_dpll_hw_state dpll_hw_state;
12215 : enum intel_dpll_id shared_dpll;
12216 : uint32_t ddi_pll_sel;
12217 : bool force_thru;
12218 :
12219 : /* FIXME: before the switch to atomic started, a new pipe_config was
12220 : * kzalloc'd. Code that depends on any field being zero should be
12221 : * fixed, so that the crtc_state can be safely duplicated. For now,
12222 : * only fields that are know to not cause problems are preserved. */
12223 :
12224 0 : tmp_state = crtc_state->base;
12225 0 : scaler_state = crtc_state->scaler_state;
12226 0 : shared_dpll = crtc_state->shared_dpll;
12227 0 : dpll_hw_state = crtc_state->dpll_hw_state;
12228 0 : ddi_pll_sel = crtc_state->ddi_pll_sel;
12229 0 : force_thru = crtc_state->pch_pfit.force_thru;
12230 :
12231 0 : memset(crtc_state, 0, sizeof *crtc_state);
12232 :
12233 0 : crtc_state->base = tmp_state;
12234 0 : crtc_state->scaler_state = scaler_state;
12235 0 : crtc_state->shared_dpll = shared_dpll;
12236 0 : crtc_state->dpll_hw_state = dpll_hw_state;
12237 0 : crtc_state->ddi_pll_sel = ddi_pll_sel;
12238 0 : crtc_state->pch_pfit.force_thru = force_thru;
12239 0 : }
12240 :
12241 : static int
12242 0 : intel_modeset_pipe_config(struct drm_crtc *crtc,
12243 : struct intel_crtc_state *pipe_config)
12244 : {
12245 0 : struct drm_atomic_state *state = pipe_config->base.state;
12246 : struct intel_encoder *encoder;
12247 : struct drm_connector *connector;
12248 : struct drm_connector_state *connector_state;
12249 : int base_bpp, ret = -EINVAL;
12250 : int i;
12251 : bool retry = true;
12252 :
12253 0 : clear_intel_crtc_state(pipe_config);
12254 :
12255 0 : pipe_config->cpu_transcoder =
12256 0 : (enum transcoder) to_intel_crtc(crtc)->pipe;
12257 :
12258 : /*
12259 : * Sanitize sync polarity flags based on requested ones. If neither
12260 : * positive or negative polarity is requested, treat this as meaning
12261 : * negative polarity.
12262 : */
12263 0 : if (!(pipe_config->base.adjusted_mode.flags &
12264 : (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12265 0 : pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12266 :
12267 0 : if (!(pipe_config->base.adjusted_mode.flags &
12268 : (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12269 0 : pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12270 :
12271 0 : base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12272 : pipe_config);
12273 0 : if (base_bpp < 0)
12274 : goto fail;
12275 :
12276 : /*
12277 : * Determine the real pipe dimensions. Note that stereo modes can
12278 : * increase the actual pipe size due to the frame doubling and
12279 : * insertion of additional space for blanks between the frame. This
12280 : * is stored in the crtc timings. We use the requested mode to do this
12281 : * computation to clearly distinguish it from the adjusted mode, which
12282 : * can be changed by the connectors in the below retry loop.
12283 : */
12284 0 : drm_crtc_get_hv_timing(&pipe_config->base.mode,
12285 0 : &pipe_config->pipe_src_w,
12286 0 : &pipe_config->pipe_src_h);
12287 :
12288 : encoder_retry:
12289 : /* Ensure the port clock defaults are reset when retrying. */
12290 0 : pipe_config->port_clock = 0;
12291 0 : pipe_config->pixel_multiplier = 1;
12292 :
12293 : /* Fill in default crtc timings, allow encoders to overwrite them. */
12294 0 : drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12295 : CRTC_STEREO_DOUBLE);
12296 :
12297 : /* Pass our mode to the connectors and the CRTC to give them a chance to
12298 : * adjust it according to limitations or connector properties, and also
12299 : * a chance to reject the mode entirely.
12300 : */
12301 0 : for_each_connector_in_state(state, connector, connector_state, i) {
12302 0 : if (connector_state->crtc != crtc)
12303 : continue;
12304 :
12305 0 : encoder = to_intel_encoder(connector_state->best_encoder);
12306 :
12307 0 : if (!(encoder->compute_config(encoder, pipe_config))) {
12308 : DRM_DEBUG_KMS("Encoder config failure\n");
12309 : goto fail;
12310 : }
12311 : }
12312 :
12313 : /* Set default port clock if not overwritten by the encoder. Needs to be
12314 : * done afterwards in case the encoder adjusts the mode. */
12315 0 : if (!pipe_config->port_clock)
12316 0 : pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12317 0 : * pipe_config->pixel_multiplier;
12318 :
12319 0 : ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12320 0 : if (ret < 0) {
12321 : DRM_DEBUG_KMS("CRTC fixup failed\n");
12322 : goto fail;
12323 : }
12324 :
12325 0 : if (ret == RETRY) {
12326 0 : if (WARN(!retry, "loop in pipe configuration computation\n")) {
12327 : ret = -EINVAL;
12328 0 : goto fail;
12329 : }
12330 :
12331 : DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12332 : retry = false;
12333 0 : goto encoder_retry;
12334 : }
12335 :
12336 : /* Dithering seems to not pass-through bits correctly when it should, so
12337 : * only enable it on 6bpc panels. */
12338 0 : pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12339 0 : DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12340 : base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12341 :
12342 : fail:
12343 0 : return ret;
12344 : }
12345 :
12346 : static void
12347 0 : intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12348 : {
12349 : struct drm_crtc *crtc;
12350 : struct drm_crtc_state *crtc_state;
12351 : int i;
12352 :
12353 : /* Double check state. */
12354 0 : for_each_crtc_in_state(state, crtc, crtc_state, i) {
12355 0 : to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12356 :
12357 : /* Update hwmode for vblank functions */
12358 0 : if (crtc->state->active)
12359 0 : crtc->hwmode = crtc->state->adjusted_mode;
12360 : else
12361 0 : crtc->hwmode.crtc_clock = 0;
12362 : }
12363 0 : }
12364 :
12365 0 : static bool intel_fuzzy_clock_check(int clock1, int clock2)
12366 : {
12367 : int diff;
12368 :
12369 0 : if (clock1 == clock2)
12370 0 : return true;
12371 :
12372 0 : if (!clock1 || !clock2)
12373 0 : return false;
12374 :
12375 0 : diff = abs(clock1 - clock2);
12376 :
12377 0 : if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12378 0 : return true;
12379 :
12380 0 : return false;
12381 0 : }
12382 :
12383 : #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12384 : list_for_each_entry((intel_crtc), \
12385 : &(dev)->mode_config.crtc_list, \
12386 : base.head) \
12387 : if (mask & (1 <<(intel_crtc)->pipe))
12388 :
12389 : static bool
12390 0 : intel_compare_m_n(unsigned int m, unsigned int n,
12391 : unsigned int m2, unsigned int n2,
12392 : bool exact)
12393 : {
12394 0 : if (m == m2 && n == n2)
12395 0 : return true;
12396 :
12397 0 : if (exact || !m || !n || !m2 || !n2)
12398 0 : return false;
12399 :
12400 : BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12401 :
12402 0 : if (m > m2) {
12403 0 : while (m > m2) {
12404 0 : m2 <<= 1;
12405 0 : n2 <<= 1;
12406 : }
12407 0 : } else if (m < m2) {
12408 0 : while (m < m2) {
12409 0 : m <<= 1;
12410 0 : n <<= 1;
12411 : }
12412 : }
12413 :
12414 0 : return m == m2 && n == n2;
12415 0 : }
12416 :
12417 : static bool
12418 0 : intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12419 : struct intel_link_m_n *m2_n2,
12420 : bool adjust)
12421 : {
12422 0 : if (m_n->tu == m2_n2->tu &&
12423 0 : intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12424 0 : m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12425 0 : intel_compare_m_n(m_n->link_m, m_n->link_n,
12426 0 : m2_n2->link_m, m2_n2->link_n, !adjust)) {
12427 0 : if (adjust)
12428 0 : *m2_n2 = *m_n;
12429 :
12430 0 : return true;
12431 : }
12432 :
12433 0 : return false;
12434 0 : }
12435 :
12436 : static bool
12437 0 : intel_pipe_config_compare(struct drm_device *dev,
12438 : struct intel_crtc_state *current_config,
12439 : struct intel_crtc_state *pipe_config,
12440 : bool adjust)
12441 : {
12442 : bool ret = true;
12443 :
12444 : #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12445 : do { \
12446 : if (!adjust) \
12447 : DRM_ERROR(fmt, ##__VA_ARGS__); \
12448 : else \
12449 : DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12450 : } while (0)
12451 :
12452 : #define PIPE_CONF_CHECK_X(name) \
12453 : if (current_config->name != pipe_config->name) { \
12454 : INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12455 : "(expected 0x%08x, found 0x%08x)\n", \
12456 : current_config->name, \
12457 : pipe_config->name); \
12458 : ret = false; \
12459 : }
12460 :
12461 : #define PIPE_CONF_CHECK_I(name) \
12462 : if (current_config->name != pipe_config->name) { \
12463 : INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12464 : "(expected %i, found %i)\n", \
12465 : current_config->name, \
12466 : pipe_config->name); \
12467 : ret = false; \
12468 : }
12469 :
12470 : #define PIPE_CONF_CHECK_M_N(name) \
12471 : if (!intel_compare_link_m_n(¤t_config->name, \
12472 : &pipe_config->name,\
12473 : adjust)) { \
12474 : INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12475 : "(expected tu %i gmch %i/%i link %i/%i, " \
12476 : "found tu %i, gmch %i/%i link %i/%i)\n", \
12477 : current_config->name.tu, \
12478 : current_config->name.gmch_m, \
12479 : current_config->name.gmch_n, \
12480 : current_config->name.link_m, \
12481 : current_config->name.link_n, \
12482 : pipe_config->name.tu, \
12483 : pipe_config->name.gmch_m, \
12484 : pipe_config->name.gmch_n, \
12485 : pipe_config->name.link_m, \
12486 : pipe_config->name.link_n); \
12487 : ret = false; \
12488 : }
12489 :
12490 : #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12491 : if (!intel_compare_link_m_n(¤t_config->name, \
12492 : &pipe_config->name, adjust) && \
12493 : !intel_compare_link_m_n(¤t_config->alt_name, \
12494 : &pipe_config->name, adjust)) { \
12495 : INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12496 : "(expected tu %i gmch %i/%i link %i/%i, " \
12497 : "or tu %i gmch %i/%i link %i/%i, " \
12498 : "found tu %i, gmch %i/%i link %i/%i)\n", \
12499 : current_config->name.tu, \
12500 : current_config->name.gmch_m, \
12501 : current_config->name.gmch_n, \
12502 : current_config->name.link_m, \
12503 : current_config->name.link_n, \
12504 : current_config->alt_name.tu, \
12505 : current_config->alt_name.gmch_m, \
12506 : current_config->alt_name.gmch_n, \
12507 : current_config->alt_name.link_m, \
12508 : current_config->alt_name.link_n, \
12509 : pipe_config->name.tu, \
12510 : pipe_config->name.gmch_m, \
12511 : pipe_config->name.gmch_n, \
12512 : pipe_config->name.link_m, \
12513 : pipe_config->name.link_n); \
12514 : ret = false; \
12515 : }
12516 :
12517 : /* This is required for BDW+ where there is only one set of registers for
12518 : * switching between high and low RR.
12519 : * This macro can be used whenever a comparison has to be made between one
12520 : * hw state and multiple sw state variables.
12521 : */
12522 : #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12523 : if ((current_config->name != pipe_config->name) && \
12524 : (current_config->alt_name != pipe_config->name)) { \
12525 : INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12526 : "(expected %i or %i, found %i)\n", \
12527 : current_config->name, \
12528 : current_config->alt_name, \
12529 : pipe_config->name); \
12530 : ret = false; \
12531 : }
12532 :
12533 : #define PIPE_CONF_CHECK_FLAGS(name, mask) \
12534 : if ((current_config->name ^ pipe_config->name) & (mask)) { \
12535 : INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12536 : "(expected %i, found %i)\n", \
12537 : current_config->name & (mask), \
12538 : pipe_config->name & (mask)); \
12539 : ret = false; \
12540 : }
12541 :
12542 : #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12543 : if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12544 : INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12545 : "(expected %i, found %i)\n", \
12546 : current_config->name, \
12547 : pipe_config->name); \
12548 : ret = false; \
12549 : }
12550 :
12551 : #define PIPE_CONF_QUIRK(quirk) \
12552 : ((current_config->quirks | pipe_config->quirks) & (quirk))
12553 :
12554 0 : PIPE_CONF_CHECK_I(cpu_transcoder);
12555 :
12556 0 : PIPE_CONF_CHECK_I(has_pch_encoder);
12557 0 : PIPE_CONF_CHECK_I(fdi_lanes);
12558 0 : PIPE_CONF_CHECK_M_N(fdi_m_n);
12559 :
12560 0 : PIPE_CONF_CHECK_I(has_dp_encoder);
12561 0 : PIPE_CONF_CHECK_I(lane_count);
12562 :
12563 0 : if (INTEL_INFO(dev)->gen < 8) {
12564 0 : PIPE_CONF_CHECK_M_N(dp_m_n);
12565 :
12566 0 : if (current_config->has_drrs)
12567 0 : PIPE_CONF_CHECK_M_N(dp_m2_n2);
12568 : } else
12569 0 : PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12570 :
12571 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12572 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12573 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12574 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12575 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12576 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12577 :
12578 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12579 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12580 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12581 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12582 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12583 0 : PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12584 :
12585 0 : PIPE_CONF_CHECK_I(pixel_multiplier);
12586 0 : PIPE_CONF_CHECK_I(has_hdmi_sink);
12587 0 : if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12588 0 : IS_VALLEYVIEW(dev))
12589 0 : PIPE_CONF_CHECK_I(limited_color_range);
12590 0 : PIPE_CONF_CHECK_I(has_infoframe);
12591 :
12592 : #ifdef notyet
12593 : PIPE_CONF_CHECK_I(has_audio);
12594 : #endif
12595 :
12596 0 : PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12597 : DRM_MODE_FLAG_INTERLACE);
12598 :
12599 0 : if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12600 0 : PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12601 : DRM_MODE_FLAG_PHSYNC);
12602 0 : PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12603 : DRM_MODE_FLAG_NHSYNC);
12604 0 : PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12605 : DRM_MODE_FLAG_PVSYNC);
12606 0 : PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12607 : DRM_MODE_FLAG_NVSYNC);
12608 : }
12609 :
12610 0 : PIPE_CONF_CHECK_X(gmch_pfit.control);
12611 : /* pfit ratios are autocomputed by the hw on gen4+ */
12612 0 : if (INTEL_INFO(dev)->gen < 4)
12613 0 : PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
12614 0 : PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12615 :
12616 0 : if (!adjust) {
12617 0 : PIPE_CONF_CHECK_I(pipe_src_w);
12618 0 : PIPE_CONF_CHECK_I(pipe_src_h);
12619 :
12620 0 : PIPE_CONF_CHECK_I(pch_pfit.enabled);
12621 0 : if (current_config->pch_pfit.enabled) {
12622 0 : PIPE_CONF_CHECK_X(pch_pfit.pos);
12623 0 : PIPE_CONF_CHECK_X(pch_pfit.size);
12624 : }
12625 :
12626 0 : PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12627 : }
12628 :
12629 : /* BDW+ don't expose a synchronous way to read the state */
12630 0 : if (IS_HASWELL(dev))
12631 0 : PIPE_CONF_CHECK_I(ips_enabled);
12632 :
12633 0 : PIPE_CONF_CHECK_I(double_wide);
12634 :
12635 0 : PIPE_CONF_CHECK_X(ddi_pll_sel);
12636 :
12637 0 : PIPE_CONF_CHECK_I(shared_dpll);
12638 0 : PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12639 0 : PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12640 0 : PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12641 0 : PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12642 0 : PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12643 0 : PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12644 0 : PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12645 0 : PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12646 0 : PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12647 :
12648 0 : if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12649 0 : PIPE_CONF_CHECK_I(pipe_bpp);
12650 :
12651 0 : PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12652 0 : PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12653 :
12654 : #undef PIPE_CONF_CHECK_X
12655 : #undef PIPE_CONF_CHECK_I
12656 : #undef PIPE_CONF_CHECK_I_ALT
12657 : #undef PIPE_CONF_CHECK_FLAGS
12658 : #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12659 : #undef PIPE_CONF_QUIRK
12660 : #undef INTEL_ERR_OR_DBG_KMS
12661 :
12662 0 : return ret;
12663 : }
12664 :
12665 0 : static void check_wm_state(struct drm_device *dev)
12666 : {
12667 0 : struct drm_i915_private *dev_priv = dev->dev_private;
12668 0 : struct skl_ddb_allocation hw_ddb, *sw_ddb;
12669 : struct intel_crtc *intel_crtc;
12670 : int plane;
12671 :
12672 0 : if (INTEL_INFO(dev)->gen < 9)
12673 0 : return;
12674 :
12675 0 : skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12676 0 : sw_ddb = &dev_priv->wm.skl_hw.ddb;
12677 :
12678 0 : for_each_intel_crtc(dev, intel_crtc) {
12679 : struct skl_ddb_entry *hw_entry, *sw_entry;
12680 0 : const enum pipe pipe = intel_crtc->pipe;
12681 :
12682 0 : if (!intel_crtc->active)
12683 0 : continue;
12684 :
12685 : /* planes */
12686 0 : for_each_plane(dev_priv, pipe, plane) {
12687 0 : hw_entry = &hw_ddb.plane[pipe][plane];
12688 0 : sw_entry = &sw_ddb->plane[pipe][plane];
12689 :
12690 0 : if (skl_ddb_entry_equal(hw_entry, sw_entry))
12691 : continue;
12692 :
12693 0 : DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12694 : "(expected (%u,%u), found (%u,%u))\n",
12695 : pipe_name(pipe), plane + 1,
12696 : sw_entry->start, sw_entry->end,
12697 : hw_entry->start, hw_entry->end);
12698 0 : }
12699 :
12700 : /* cursor */
12701 0 : hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12702 0 : sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12703 :
12704 0 : if (skl_ddb_entry_equal(hw_entry, sw_entry))
12705 0 : continue;
12706 :
12707 0 : DRM_ERROR("mismatch in DDB state pipe %c cursor "
12708 : "(expected (%u,%u), found (%u,%u))\n",
12709 : pipe_name(pipe),
12710 : sw_entry->start, sw_entry->end,
12711 : hw_entry->start, hw_entry->end);
12712 0 : }
12713 0 : }
12714 :
12715 : static void
12716 0 : check_connector_state(struct drm_device *dev,
12717 : struct drm_atomic_state *old_state)
12718 : {
12719 : struct drm_connector_state *old_conn_state;
12720 : struct drm_connector *connector;
12721 : int i;
12722 :
12723 0 : for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12724 0 : struct drm_encoder *encoder = connector->encoder;
12725 0 : struct drm_connector_state *state = connector->state;
12726 :
12727 : /* This also checks the encoder/connector hw state with the
12728 : * ->get_hw_state callbacks. */
12729 0 : intel_connector_check_state(to_intel_connector(connector));
12730 :
12731 0 : I915_STATE_WARN(state->best_encoder != encoder,
12732 : "connector's atomic encoder doesn't match legacy encoder\n");
12733 0 : }
12734 0 : }
12735 :
12736 : static void
12737 0 : check_encoder_state(struct drm_device *dev)
12738 : {
12739 : struct intel_encoder *encoder;
12740 : struct intel_connector *connector;
12741 :
12742 0 : for_each_intel_encoder(dev, encoder) {
12743 : bool enabled = false;
12744 0 : enum pipe pipe;
12745 :
12746 : DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12747 : encoder->base.base.id,
12748 : encoder->base.name);
12749 :
12750 0 : for_each_intel_connector(dev, connector) {
12751 0 : if (connector->base.state->best_encoder != &encoder->base)
12752 : continue;
12753 : enabled = true;
12754 :
12755 0 : I915_STATE_WARN(connector->base.state->crtc !=
12756 : encoder->base.crtc,
12757 : "connector's crtc doesn't match encoder crtc\n");
12758 0 : }
12759 :
12760 0 : I915_STATE_WARN(!!encoder->base.crtc != enabled,
12761 : "encoder's enabled state mismatch "
12762 : "(expected %i, found %i)\n",
12763 : !!encoder->base.crtc, enabled);
12764 :
12765 0 : if (!encoder->base.crtc) {
12766 : bool active;
12767 :
12768 0 : active = encoder->get_hw_state(encoder, &pipe);
12769 0 : I915_STATE_WARN(active,
12770 : "encoder detached but still enabled on pipe %c.\n",
12771 : pipe_name(pipe));
12772 0 : }
12773 0 : }
12774 0 : }
12775 :
12776 : static void
12777 0 : check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
12778 : {
12779 0 : struct drm_i915_private *dev_priv = dev->dev_private;
12780 : struct intel_encoder *encoder;
12781 : struct drm_crtc_state *old_crtc_state;
12782 : struct drm_crtc *crtc;
12783 : int i;
12784 :
12785 0 : for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12786 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12787 : struct intel_crtc_state *pipe_config, *sw_config;
12788 : bool active;
12789 :
12790 0 : if (!needs_modeset(crtc->state) &&
12791 0 : !to_intel_crtc_state(crtc->state)->update_pipe)
12792 0 : continue;
12793 :
12794 0 : __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12795 0 : pipe_config = to_intel_crtc_state(old_crtc_state);
12796 0 : memset(pipe_config, 0, sizeof(*pipe_config));
12797 0 : pipe_config->base.crtc = crtc;
12798 0 : pipe_config->base.state = old_state;
12799 :
12800 : DRM_DEBUG_KMS("[CRTC:%d]\n",
12801 : crtc->base.id);
12802 :
12803 0 : active = dev_priv->display.get_pipe_config(intel_crtc,
12804 : pipe_config);
12805 :
12806 : /* hw state is inconsistent with the pipe quirk */
12807 0 : if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12808 0 : (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12809 0 : active = crtc->state->active;
12810 :
12811 0 : I915_STATE_WARN(crtc->state->active != active,
12812 : "crtc active state doesn't match with hw state "
12813 : "(expected %i, found %i)\n", crtc->state->active, active);
12814 :
12815 0 : I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12816 : "transitional active state does not match atomic hw state "
12817 : "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
12818 :
12819 0 : for_each_encoder_on_crtc(dev, crtc, encoder) {
12820 0 : enum pipe pipe;
12821 :
12822 0 : active = encoder->get_hw_state(encoder, &pipe);
12823 0 : I915_STATE_WARN(active != crtc->state->active,
12824 : "[ENCODER:%i] active %i with crtc active %i\n",
12825 : encoder->base.base.id, active, crtc->state->active);
12826 :
12827 0 : I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12828 : "Encoder connected to wrong pipe %c\n",
12829 : pipe_name(pipe));
12830 :
12831 0 : if (active)
12832 0 : encoder->get_config(encoder, pipe_config);
12833 0 : }
12834 :
12835 0 : if (!crtc->state->active)
12836 0 : continue;
12837 :
12838 0 : sw_config = to_intel_crtc_state(crtc->state);
12839 0 : if (!intel_pipe_config_compare(dev, sw_config,
12840 : pipe_config, false)) {
12841 0 : I915_STATE_WARN(1, "pipe state doesn't match!\n");
12842 0 : intel_dump_pipe_config(intel_crtc, pipe_config,
12843 : "[hw state]");
12844 0 : intel_dump_pipe_config(intel_crtc, sw_config,
12845 : "[sw state]");
12846 0 : }
12847 0 : }
12848 0 : }
12849 :
12850 : static void
12851 0 : check_shared_dpll_state(struct drm_device *dev)
12852 : {
12853 0 : struct drm_i915_private *dev_priv = dev->dev_private;
12854 : struct intel_crtc *crtc;
12855 0 : struct intel_dpll_hw_state dpll_hw_state;
12856 : int i;
12857 :
12858 0 : for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12859 0 : struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12860 : int enabled_crtcs = 0, active_crtcs = 0;
12861 : bool active;
12862 :
12863 0 : memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12864 :
12865 : DRM_DEBUG_KMS("%s\n", pll->name);
12866 :
12867 0 : active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
12868 :
12869 0 : I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
12870 : "more active pll users than references: %i vs %i\n",
12871 : pll->active, hweight32(pll->config.crtc_mask));
12872 0 : I915_STATE_WARN(pll->active && !pll->on,
12873 : "pll in active use but not on in sw tracking\n");
12874 0 : I915_STATE_WARN(pll->on && !pll->active,
12875 : "pll in on but not on in use in sw tracking\n");
12876 0 : I915_STATE_WARN(pll->on != active,
12877 : "pll on state mismatch (expected %i, found %i)\n",
12878 : pll->on, active);
12879 :
12880 0 : for_each_intel_crtc(dev, crtc) {
12881 0 : if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
12882 0 : enabled_crtcs++;
12883 0 : if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12884 0 : active_crtcs++;
12885 : }
12886 0 : I915_STATE_WARN(pll->active != active_crtcs,
12887 : "pll active crtcs mismatch (expected %i, found %i)\n",
12888 : pll->active, active_crtcs);
12889 0 : I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
12890 : "pll enabled crtcs mismatch (expected %i, found %i)\n",
12891 : hweight32(pll->config.crtc_mask), enabled_crtcs);
12892 :
12893 0 : I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
12894 : sizeof(dpll_hw_state)),
12895 : "pll hw state mismatch\n");
12896 : }
12897 0 : }
12898 :
12899 : static void
12900 0 : intel_modeset_check_state(struct drm_device *dev,
12901 : struct drm_atomic_state *old_state)
12902 : {
12903 0 : check_wm_state(dev);
12904 0 : check_connector_state(dev, old_state);
12905 0 : check_encoder_state(dev);
12906 0 : check_crtc_state(dev, old_state);
12907 0 : check_shared_dpll_state(dev);
12908 0 : }
12909 :
12910 0 : void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
12911 : int dotclock)
12912 : {
12913 : /*
12914 : * FDI already provided one idea for the dotclock.
12915 : * Yell if the encoder disagrees.
12916 : */
12917 0 : WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
12918 : "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12919 : pipe_config->base.adjusted_mode.crtc_clock, dotclock);
12920 0 : }
12921 :
12922 0 : static void update_scanline_offset(struct intel_crtc *crtc)
12923 : {
12924 0 : struct drm_device *dev = crtc->base.dev;
12925 :
12926 : /*
12927 : * The scanline counter increments at the leading edge of hsync.
12928 : *
12929 : * On most platforms it starts counting from vtotal-1 on the
12930 : * first active line. That means the scanline counter value is
12931 : * always one less than what we would expect. Ie. just after
12932 : * start of vblank, which also occurs at start of hsync (on the
12933 : * last active line), the scanline counter will read vblank_start-1.
12934 : *
12935 : * On gen2 the scanline counter starts counting from 1 instead
12936 : * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12937 : * to keep the value positive), instead of adding one.
12938 : *
12939 : * On HSW+ the behaviour of the scanline counter depends on the output
12940 : * type. For DP ports it behaves like most other platforms, but on HDMI
12941 : * there's an extra 1 line difference. So we need to add two instead of
12942 : * one to the value.
12943 : */
12944 0 : if (IS_GEN2(dev)) {
12945 0 : const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12946 : int vtotal;
12947 :
12948 0 : vtotal = adjusted_mode->crtc_vtotal;
12949 0 : if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12950 0 : vtotal /= 2;
12951 :
12952 0 : crtc->scanline_offset = vtotal - 1;
12953 0 : } else if (HAS_DDI(dev) &&
12954 0 : intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
12955 0 : crtc->scanline_offset = 2;
12956 0 : } else
12957 0 : crtc->scanline_offset = 1;
12958 0 : }
12959 :
12960 0 : static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12961 : {
12962 0 : struct drm_device *dev = state->dev;
12963 0 : struct drm_i915_private *dev_priv = to_i915(dev);
12964 : struct intel_shared_dpll_config *shared_dpll = NULL;
12965 : struct intel_crtc *intel_crtc;
12966 : struct intel_crtc_state *intel_crtc_state;
12967 : struct drm_crtc *crtc;
12968 : struct drm_crtc_state *crtc_state;
12969 : int i;
12970 :
12971 0 : if (!dev_priv->display.crtc_compute_clock)
12972 0 : return;
12973 :
12974 0 : for_each_crtc_in_state(state, crtc, crtc_state, i) {
12975 : int dpll;
12976 :
12977 0 : intel_crtc = to_intel_crtc(crtc);
12978 0 : intel_crtc_state = to_intel_crtc_state(crtc_state);
12979 0 : dpll = intel_crtc_state->shared_dpll;
12980 :
12981 0 : if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
12982 0 : continue;
12983 :
12984 0 : intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
12985 :
12986 0 : if (!shared_dpll)
12987 0 : shared_dpll = intel_atomic_get_shared_dpll_state(state);
12988 :
12989 0 : shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
12990 0 : }
12991 0 : }
12992 :
12993 : /*
12994 : * This implements the workaround described in the "notes" section of the mode
12995 : * set sequence documentation. When going from no pipes or single pipe to
12996 : * multiple pipes, and planes are enabled after the pipe, we need to wait at
12997 : * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12998 : */
12999 0 : static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13000 : {
13001 : struct drm_crtc_state *crtc_state;
13002 : struct intel_crtc *intel_crtc;
13003 : struct drm_crtc *crtc;
13004 : struct intel_crtc_state *first_crtc_state = NULL;
13005 : struct intel_crtc_state *other_crtc_state = NULL;
13006 : enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13007 : int i;
13008 :
13009 : /* look at all crtc's that are going to be enabled in during modeset */
13010 0 : for_each_crtc_in_state(state, crtc, crtc_state, i) {
13011 0 : intel_crtc = to_intel_crtc(crtc);
13012 :
13013 0 : if (!crtc_state->active || !needs_modeset(crtc_state))
13014 : continue;
13015 :
13016 0 : if (first_crtc_state) {
13017 0 : other_crtc_state = to_intel_crtc_state(crtc_state);
13018 0 : break;
13019 : } else {
13020 0 : first_crtc_state = to_intel_crtc_state(crtc_state);
13021 0 : first_pipe = intel_crtc->pipe;
13022 : }
13023 0 : }
13024 :
13025 : /* No workaround needed? */
13026 0 : if (!first_crtc_state)
13027 0 : return 0;
13028 :
13029 : /* w/a possibly needed, check how many crtc's are already enabled. */
13030 0 : for_each_intel_crtc(state->dev, intel_crtc) {
13031 : struct intel_crtc_state *pipe_config;
13032 :
13033 0 : pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13034 0 : if (IS_ERR(pipe_config))
13035 0 : return PTR_ERR(pipe_config);
13036 :
13037 0 : pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13038 :
13039 0 : if (!pipe_config->base.active ||
13040 0 : needs_modeset(&pipe_config->base))
13041 0 : continue;
13042 :
13043 : /* 2 or more enabled crtcs means no need for w/a */
13044 0 : if (enabled_pipe != INVALID_PIPE)
13045 0 : return 0;
13046 :
13047 0 : enabled_pipe = intel_crtc->pipe;
13048 0 : }
13049 :
13050 0 : if (enabled_pipe != INVALID_PIPE)
13051 0 : first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13052 0 : else if (other_crtc_state)
13053 0 : other_crtc_state->hsw_workaround_pipe = first_pipe;
13054 :
13055 0 : return 0;
13056 0 : }
13057 :
13058 0 : static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13059 : {
13060 : struct drm_crtc *crtc;
13061 : struct drm_crtc_state *crtc_state;
13062 : int ret = 0;
13063 :
13064 : /* add all active pipes to the state */
13065 0 : for_each_crtc(state->dev, crtc) {
13066 0 : crtc_state = drm_atomic_get_crtc_state(state, crtc);
13067 0 : if (IS_ERR(crtc_state))
13068 0 : return PTR_ERR(crtc_state);
13069 :
13070 0 : if (!crtc_state->active || needs_modeset(crtc_state))
13071 : continue;
13072 :
13073 0 : crtc_state->mode_changed = true;
13074 :
13075 0 : ret = drm_atomic_add_affected_connectors(state, crtc);
13076 0 : if (ret)
13077 : break;
13078 :
13079 0 : ret = drm_atomic_add_affected_planes(state, crtc);
13080 0 : if (ret)
13081 : break;
13082 : }
13083 :
13084 0 : return ret;
13085 0 : }
13086 :
13087 0 : static int intel_modeset_checks(struct drm_atomic_state *state)
13088 : {
13089 0 : struct drm_device *dev = state->dev;
13090 0 : struct drm_i915_private *dev_priv = dev->dev_private;
13091 : int ret;
13092 :
13093 0 : if (!check_digital_port_conflicts(state)) {
13094 : DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13095 0 : return -EINVAL;
13096 : }
13097 :
13098 : /*
13099 : * See if the config requires any additional preparation, e.g.
13100 : * to adjust global state with pipes off. We need to do this
13101 : * here so we can get the modeset_pipe updated config for the new
13102 : * mode set on this crtc. For other crtcs we need to use the
13103 : * adjusted_mode bits in the crtc directly.
13104 : */
13105 0 : if (dev_priv->display.modeset_calc_cdclk) {
13106 : unsigned int cdclk;
13107 :
13108 0 : ret = dev_priv->display.modeset_calc_cdclk(state);
13109 :
13110 0 : cdclk = to_intel_atomic_state(state)->cdclk;
13111 0 : if (!ret && cdclk != dev_priv->cdclk_freq)
13112 0 : ret = intel_modeset_all_pipes(state);
13113 :
13114 0 : if (ret < 0)
13115 0 : return ret;
13116 0 : } else
13117 0 : to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
13118 :
13119 0 : intel_modeset_clear_plls(state);
13120 :
13121 0 : if (IS_HASWELL(dev))
13122 0 : return haswell_mode_set_planes_workaround(state);
13123 :
13124 0 : return 0;
13125 0 : }
13126 :
13127 : /**
13128 : * intel_atomic_check - validate state object
13129 : * @dev: drm device
13130 : * @state: state to validate
13131 : */
13132 0 : static int intel_atomic_check(struct drm_device *dev,
13133 : struct drm_atomic_state *state)
13134 : {
13135 : struct drm_crtc *crtc;
13136 : struct drm_crtc_state *crtc_state;
13137 : int ret, i;
13138 : bool any_ms = false;
13139 :
13140 0 : ret = drm_atomic_helper_check_modeset(dev, state);
13141 0 : if (ret)
13142 0 : return ret;
13143 :
13144 0 : for_each_crtc_in_state(state, crtc, crtc_state, i) {
13145 : struct intel_crtc_state *pipe_config =
13146 0 : to_intel_crtc_state(crtc_state);
13147 :
13148 0 : memset(&to_intel_crtc(crtc)->atomic, 0,
13149 : sizeof(struct intel_crtc_atomic_commit));
13150 :
13151 : /* Catch I915_MODE_FLAG_INHERITED */
13152 0 : if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13153 0 : crtc_state->mode_changed = true;
13154 :
13155 0 : if (!crtc_state->enable) {
13156 0 : if (needs_modeset(crtc_state))
13157 0 : any_ms = true;
13158 0 : continue;
13159 : }
13160 :
13161 0 : if (!needs_modeset(crtc_state))
13162 0 : continue;
13163 :
13164 : /* FIXME: For only active_changed we shouldn't need to do any
13165 : * state recomputation at all. */
13166 :
13167 0 : ret = drm_atomic_add_affected_connectors(state, crtc);
13168 0 : if (ret)
13169 0 : return ret;
13170 :
13171 0 : ret = intel_modeset_pipe_config(crtc, pipe_config);
13172 0 : if (ret)
13173 0 : return ret;
13174 :
13175 0 : if (i915.fastboot &&
13176 0 : intel_pipe_config_compare(state->dev,
13177 0 : to_intel_crtc_state(crtc->state),
13178 : pipe_config, true)) {
13179 0 : crtc_state->mode_changed = false;
13180 0 : to_intel_crtc_state(crtc_state)->update_pipe = true;
13181 0 : }
13182 :
13183 0 : if (needs_modeset(crtc_state)) {
13184 : any_ms = true;
13185 :
13186 0 : ret = drm_atomic_add_affected_planes(state, crtc);
13187 0 : if (ret)
13188 0 : return ret;
13189 : }
13190 :
13191 0 : intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13192 0 : needs_modeset(crtc_state) ?
13193 : "[modeset]" : "[fastset]");
13194 0 : }
13195 :
13196 0 : if (any_ms) {
13197 0 : ret = intel_modeset_checks(state);
13198 :
13199 0 : if (ret)
13200 0 : return ret;
13201 : } else
13202 0 : to_intel_atomic_state(state)->cdclk =
13203 0 : to_i915(state->dev)->cdclk_freq;
13204 :
13205 0 : return drm_atomic_helper_check_planes(state->dev, state);
13206 0 : }
13207 :
13208 : /**
13209 : * intel_atomic_commit - commit validated state object
13210 : * @dev: DRM device
13211 : * @state: the top-level driver state object
13212 : * @async: asynchronous commit
13213 : *
13214 : * This function commits a top-level state object that has been validated
13215 : * with drm_atomic_helper_check().
13216 : *
13217 : * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13218 : * we can only handle plane-related operations and do not yet support
13219 : * asynchronous commit.
13220 : *
13221 : * RETURNS
13222 : * Zero for success or -errno.
13223 : */
13224 0 : static int intel_atomic_commit(struct drm_device *dev,
13225 : struct drm_atomic_state *state,
13226 : bool async)
13227 : {
13228 0 : struct drm_i915_private *dev_priv = dev->dev_private;
13229 : struct drm_crtc *crtc;
13230 : struct drm_crtc_state *crtc_state;
13231 : int ret = 0;
13232 : int i;
13233 : bool any_ms = false;
13234 :
13235 0 : if (async) {
13236 : DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13237 0 : return -EINVAL;
13238 : }
13239 :
13240 0 : ret = drm_atomic_helper_prepare_planes(dev, state);
13241 0 : if (ret)
13242 0 : return ret;
13243 :
13244 0 : drm_atomic_helper_swap_state(dev, state);
13245 :
13246 0 : for_each_crtc_in_state(state, crtc, crtc_state, i) {
13247 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13248 :
13249 0 : if (!needs_modeset(crtc->state))
13250 0 : continue;
13251 :
13252 : any_ms = true;
13253 0 : intel_pre_plane_update(intel_crtc);
13254 :
13255 0 : if (crtc_state->active) {
13256 0 : intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13257 0 : dev_priv->display.crtc_disable(crtc);
13258 0 : intel_crtc->active = false;
13259 0 : intel_disable_shared_dpll(intel_crtc);
13260 0 : }
13261 0 : }
13262 :
13263 : /* Only after disabling all output pipelines that will be changed can we
13264 : * update the the output configuration. */
13265 0 : intel_modeset_update_crtc_state(state);
13266 :
13267 0 : if (any_ms) {
13268 0 : intel_shared_dpll_commit(state);
13269 :
13270 0 : drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13271 0 : modeset_update_crtc_power_domains(state);
13272 0 : }
13273 :
13274 : /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13275 0 : for_each_crtc_in_state(state, crtc, crtc_state, i) {
13276 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13277 0 : bool modeset = needs_modeset(crtc->state);
13278 0 : bool update_pipe = !modeset &&
13279 0 : to_intel_crtc_state(crtc->state)->update_pipe;
13280 : unsigned long put_domains = 0;
13281 :
13282 0 : if (modeset && crtc->state->active) {
13283 0 : update_scanline_offset(to_intel_crtc(crtc));
13284 0 : dev_priv->display.crtc_enable(crtc);
13285 0 : }
13286 :
13287 0 : if (update_pipe) {
13288 0 : put_domains = modeset_get_crtc_power_domains(crtc);
13289 :
13290 : /* make sure intel_modeset_check_state runs */
13291 : any_ms = true;
13292 0 : }
13293 :
13294 0 : if (!modeset)
13295 0 : intel_pre_plane_update(intel_crtc);
13296 :
13297 0 : drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13298 :
13299 0 : if (put_domains)
13300 0 : modeset_put_power_domains(dev_priv, put_domains);
13301 :
13302 0 : intel_post_plane_update(intel_crtc);
13303 0 : }
13304 :
13305 : /* FIXME: add subpixel order */
13306 :
13307 0 : drm_atomic_helper_wait_for_vblanks(dev, state);
13308 0 : drm_atomic_helper_cleanup_planes(dev, state);
13309 :
13310 0 : if (any_ms)
13311 0 : intel_modeset_check_state(dev, state);
13312 :
13313 0 : drm_atomic_state_free(state);
13314 :
13315 0 : return 0;
13316 0 : }
13317 :
13318 0 : void intel_crtc_restore_mode(struct drm_crtc *crtc)
13319 : {
13320 0 : struct drm_device *dev = crtc->dev;
13321 : struct drm_atomic_state *state;
13322 : struct drm_crtc_state *crtc_state;
13323 : int ret;
13324 :
13325 0 : state = drm_atomic_state_alloc(dev);
13326 0 : if (!state) {
13327 : DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13328 : crtc->base.id);
13329 0 : return;
13330 : }
13331 :
13332 0 : state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13333 :
13334 : retry:
13335 0 : crtc_state = drm_atomic_get_crtc_state(state, crtc);
13336 0 : ret = PTR_ERR_OR_ZERO(crtc_state);
13337 0 : if (!ret) {
13338 0 : if (!crtc_state->active)
13339 : goto out;
13340 :
13341 0 : crtc_state->mode_changed = true;
13342 0 : ret = drm_atomic_commit(state);
13343 0 : }
13344 :
13345 0 : if (ret == -EDEADLK) {
13346 0 : drm_atomic_state_clear(state);
13347 0 : drm_modeset_backoff(state->acquire_ctx);
13348 0 : goto retry;
13349 : }
13350 :
13351 0 : if (ret)
13352 : out:
13353 0 : drm_atomic_state_free(state);
13354 0 : }
13355 :
13356 : #undef for_each_intel_crtc_masked
13357 :
13358 : static const struct drm_crtc_funcs intel_crtc_funcs = {
13359 : .gamma_set = intel_crtc_gamma_set,
13360 : .set_config = drm_atomic_helper_set_config,
13361 : .destroy = intel_crtc_destroy,
13362 : .page_flip = intel_crtc_page_flip,
13363 : .atomic_duplicate_state = intel_crtc_duplicate_state,
13364 : .atomic_destroy_state = intel_crtc_destroy_state,
13365 : };
13366 :
13367 0 : static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13368 : struct intel_shared_dpll *pll,
13369 : struct intel_dpll_hw_state *hw_state)
13370 : {
13371 : uint32_t val;
13372 :
13373 0 : if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
13374 0 : return false;
13375 :
13376 0 : val = I915_READ(PCH_DPLL(pll->id));
13377 0 : hw_state->dpll = val;
13378 0 : hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13379 0 : hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13380 :
13381 0 : return val & DPLL_VCO_ENABLE;
13382 0 : }
13383 :
13384 0 : static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13385 : struct intel_shared_dpll *pll)
13386 : {
13387 0 : I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13388 0 : I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
13389 0 : }
13390 :
13391 0 : static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13392 : struct intel_shared_dpll *pll)
13393 : {
13394 : /* PCH refclock must be enabled first */
13395 0 : ibx_assert_pch_refclk_enabled(dev_priv);
13396 :
13397 0 : I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13398 :
13399 : /* Wait for the clocks to stabilize. */
13400 0 : POSTING_READ(PCH_DPLL(pll->id));
13401 0 : udelay(150);
13402 :
13403 : /* The pixel multiplier can only be updated once the
13404 : * DPLL is enabled and the clocks are stable.
13405 : *
13406 : * So write it again.
13407 : */
13408 0 : I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13409 0 : POSTING_READ(PCH_DPLL(pll->id));
13410 0 : udelay(200);
13411 0 : }
13412 :
13413 0 : static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13414 : struct intel_shared_dpll *pll)
13415 : {
13416 0 : struct drm_device *dev = dev_priv->dev;
13417 : struct intel_crtc *crtc;
13418 :
13419 : /* Make sure no transcoder isn't still depending on us. */
13420 0 : for_each_intel_crtc(dev, crtc) {
13421 0 : if (intel_crtc_to_shared_dpll(crtc) == pll)
13422 0 : assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
13423 : }
13424 :
13425 0 : I915_WRITE(PCH_DPLL(pll->id), 0);
13426 0 : POSTING_READ(PCH_DPLL(pll->id));
13427 0 : udelay(200);
13428 0 : }
13429 :
13430 : static char *ibx_pch_dpll_names[] = {
13431 : "PCH DPLL A",
13432 : "PCH DPLL B",
13433 : };
13434 :
13435 0 : static void ibx_pch_dpll_init(struct drm_device *dev)
13436 : {
13437 0 : struct drm_i915_private *dev_priv = dev->dev_private;
13438 : int i;
13439 :
13440 0 : dev_priv->num_shared_dpll = 2;
13441 :
13442 0 : for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13443 0 : dev_priv->shared_dplls[i].id = i;
13444 0 : dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13445 0 : dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13446 0 : dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13447 0 : dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13448 0 : dev_priv->shared_dplls[i].get_hw_state =
13449 : ibx_pch_dpll_get_hw_state;
13450 : }
13451 0 : }
13452 :
13453 0 : static void intel_shared_dpll_init(struct drm_device *dev)
13454 : {
13455 0 : struct drm_i915_private *dev_priv = dev->dev_private;
13456 :
13457 0 : if (HAS_DDI(dev))
13458 0 : intel_ddi_pll_init(dev);
13459 0 : else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13460 0 : ibx_pch_dpll_init(dev);
13461 : else
13462 0 : dev_priv->num_shared_dpll = 0;
13463 :
13464 0 : BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13465 0 : }
13466 :
13467 : /**
13468 : * intel_prepare_plane_fb - Prepare fb for usage on plane
13469 : * @plane: drm plane to prepare for
13470 : * @fb: framebuffer to prepare for presentation
13471 : *
13472 : * Prepares a framebuffer for usage on a display plane. Generally this
13473 : * involves pinning the underlying object and updating the frontbuffer tracking
13474 : * bits. Some older platforms need special physical address handling for
13475 : * cursor planes.
13476 : *
13477 : * Returns 0 on success, negative error code on failure.
13478 : */
13479 : int
13480 0 : intel_prepare_plane_fb(struct drm_plane *plane,
13481 : const struct drm_plane_state *new_state)
13482 : {
13483 0 : struct drm_device *dev = plane->dev;
13484 0 : struct drm_framebuffer *fb = new_state->fb;
13485 0 : struct intel_plane *intel_plane = to_intel_plane(plane);
13486 0 : struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13487 0 : struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
13488 : int ret = 0;
13489 :
13490 0 : if (!obj)
13491 0 : return 0;
13492 :
13493 0 : mutex_lock(&dev->struct_mutex);
13494 :
13495 0 : if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13496 0 : INTEL_INFO(dev)->cursor_needs_physical) {
13497 0 : int align = IS_I830(dev) ? 16 * 1024 : 256;
13498 0 : ret = i915_gem_object_attach_phys(obj, align);
13499 : if (ret)
13500 : DRM_DEBUG_KMS("failed to attach phys object\n");
13501 0 : } else {
13502 0 : ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
13503 : }
13504 :
13505 0 : if (ret == 0)
13506 0 : i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13507 :
13508 0 : mutex_unlock(&dev->struct_mutex);
13509 :
13510 0 : return ret;
13511 0 : }
13512 :
13513 : /**
13514 : * intel_cleanup_plane_fb - Cleans up an fb after plane use
13515 : * @plane: drm plane to clean up for
13516 : * @fb: old framebuffer that was on plane
13517 : *
13518 : * Cleans up a framebuffer that has just been removed from a plane.
13519 : */
13520 : void
13521 0 : intel_cleanup_plane_fb(struct drm_plane *plane,
13522 : const struct drm_plane_state *old_state)
13523 : {
13524 0 : struct drm_device *dev = plane->dev;
13525 0 : struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
13526 :
13527 0 : if (!obj)
13528 0 : return;
13529 :
13530 0 : if (plane->type != DRM_PLANE_TYPE_CURSOR ||
13531 0 : !INTEL_INFO(dev)->cursor_needs_physical) {
13532 0 : mutex_lock(&dev->struct_mutex);
13533 0 : intel_unpin_fb_obj(old_state->fb, old_state);
13534 0 : mutex_unlock(&dev->struct_mutex);
13535 0 : }
13536 0 : }
13537 :
13538 : int
13539 0 : skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13540 : {
13541 : int max_scale;
13542 : struct drm_device *dev;
13543 : struct drm_i915_private *dev_priv;
13544 : int crtc_clock, cdclk;
13545 :
13546 0 : if (!intel_crtc || !crtc_state)
13547 0 : return DRM_PLANE_HELPER_NO_SCALING;
13548 :
13549 0 : dev = intel_crtc->base.dev;
13550 0 : dev_priv = dev->dev_private;
13551 0 : crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13552 0 : cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13553 :
13554 0 : if (!crtc_clock || !cdclk)
13555 0 : return DRM_PLANE_HELPER_NO_SCALING;
13556 :
13557 : /*
13558 : * skl max scale is lower of:
13559 : * close to 3 but not 3, -1 is for that purpose
13560 : * or
13561 : * cdclk/crtc_clock
13562 : */
13563 0 : max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13564 :
13565 0 : return max_scale;
13566 0 : }
13567 :
13568 : static int
13569 0 : intel_check_primary_plane(struct drm_plane *plane,
13570 : struct intel_crtc_state *crtc_state,
13571 : struct intel_plane_state *state)
13572 : {
13573 0 : struct drm_crtc *crtc = state->base.crtc;
13574 0 : struct drm_framebuffer *fb = state->base.fb;
13575 : int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13576 : int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13577 : bool can_position = false;
13578 :
13579 0 : if (INTEL_INFO(plane->dev)->gen >= 9) {
13580 : /* use scaler when colorkey is not required */
13581 0 : if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13582 : min_scale = 1;
13583 0 : max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13584 0 : }
13585 : can_position = true;
13586 0 : }
13587 :
13588 0 : return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13589 0 : &state->dst, &state->clip,
13590 : min_scale, max_scale,
13591 0 : can_position, true,
13592 0 : &state->visible);
13593 : }
13594 :
13595 : static void
13596 0 : intel_commit_primary_plane(struct drm_plane *plane,
13597 : struct intel_plane_state *state)
13598 : {
13599 0 : struct drm_crtc *crtc = state->base.crtc;
13600 0 : struct drm_framebuffer *fb = state->base.fb;
13601 0 : struct drm_device *dev = plane->dev;
13602 0 : struct drm_i915_private *dev_priv = dev->dev_private;
13603 : struct intel_crtc *intel_crtc;
13604 0 : struct drm_rect *src = &state->src;
13605 :
13606 0 : crtc = crtc ? crtc : plane->crtc;
13607 0 : intel_crtc = to_intel_crtc(crtc);
13608 :
13609 0 : plane->fb = fb;
13610 0 : crtc->x = src->x1 >> 16;
13611 0 : crtc->y = src->y1 >> 16;
13612 :
13613 0 : if (!crtc->state->active)
13614 0 : return;
13615 :
13616 0 : dev_priv->display.update_primary_plane(crtc, fb,
13617 0 : state->src.x1 >> 16,
13618 0 : state->src.y1 >> 16);
13619 0 : }
13620 :
13621 : static void
13622 0 : intel_disable_primary_plane(struct drm_plane *plane,
13623 : struct drm_crtc *crtc)
13624 : {
13625 0 : struct drm_device *dev = plane->dev;
13626 0 : struct drm_i915_private *dev_priv = dev->dev_private;
13627 :
13628 0 : dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
13629 0 : }
13630 :
13631 0 : static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13632 : struct drm_crtc_state *old_crtc_state)
13633 : {
13634 0 : struct drm_device *dev = crtc->dev;
13635 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13636 : struct intel_crtc_state *old_intel_state =
13637 0 : to_intel_crtc_state(old_crtc_state);
13638 0 : bool modeset = needs_modeset(crtc->state);
13639 :
13640 0 : if (intel_crtc->atomic.update_wm_pre)
13641 0 : intel_update_watermarks(crtc);
13642 :
13643 : /* Perform vblank evasion around commit operation */
13644 0 : if (crtc->state->active)
13645 0 : intel_pipe_update_start(intel_crtc);
13646 :
13647 0 : if (modeset)
13648 0 : return;
13649 :
13650 0 : if (to_intel_crtc_state(crtc->state)->update_pipe)
13651 0 : intel_update_pipe_config(intel_crtc, old_intel_state);
13652 0 : else if (INTEL_INFO(dev)->gen >= 9)
13653 0 : skl_detach_scalers(intel_crtc);
13654 0 : }
13655 :
13656 0 : static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13657 : struct drm_crtc_state *old_crtc_state)
13658 : {
13659 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13660 :
13661 0 : if (crtc->state->active)
13662 0 : intel_pipe_update_end(intel_crtc);
13663 0 : }
13664 :
13665 : /**
13666 : * intel_plane_destroy - destroy a plane
13667 : * @plane: plane to destroy
13668 : *
13669 : * Common destruction function for all types of planes (primary, cursor,
13670 : * sprite).
13671 : */
13672 0 : void intel_plane_destroy(struct drm_plane *plane)
13673 : {
13674 0 : struct intel_plane *intel_plane = to_intel_plane(plane);
13675 0 : drm_plane_cleanup(plane);
13676 0 : kfree(intel_plane);
13677 0 : }
13678 :
13679 : const struct drm_plane_funcs intel_plane_funcs = {
13680 : .update_plane = drm_atomic_helper_update_plane,
13681 : .disable_plane = drm_atomic_helper_disable_plane,
13682 : .destroy = intel_plane_destroy,
13683 : .set_property = drm_atomic_helper_plane_set_property,
13684 : .atomic_get_property = intel_plane_atomic_get_property,
13685 : .atomic_set_property = intel_plane_atomic_set_property,
13686 : .atomic_duplicate_state = intel_plane_duplicate_state,
13687 : .atomic_destroy_state = intel_plane_destroy_state,
13688 :
13689 : };
13690 :
13691 0 : static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
13692 : int pipe)
13693 : {
13694 : struct intel_plane *primary;
13695 : struct intel_plane_state *state;
13696 : const uint32_t *intel_primary_formats;
13697 : unsigned int num_formats;
13698 :
13699 0 : primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13700 0 : if (primary == NULL)
13701 0 : return NULL;
13702 :
13703 0 : state = intel_create_plane_state(&primary->base);
13704 0 : if (!state) {
13705 0 : kfree(primary);
13706 0 : return NULL;
13707 : }
13708 0 : primary->base.state = &state->base;
13709 :
13710 0 : primary->can_scale = false;
13711 0 : primary->max_downscale = 1;
13712 0 : if (INTEL_INFO(dev)->gen >= 9) {
13713 0 : primary->can_scale = true;
13714 0 : state->scaler_id = -1;
13715 0 : }
13716 0 : primary->pipe = pipe;
13717 0 : primary->plane = pipe;
13718 0 : primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
13719 0 : primary->check_plane = intel_check_primary_plane;
13720 0 : primary->commit_plane = intel_commit_primary_plane;
13721 0 : primary->disable_plane = intel_disable_primary_plane;
13722 0 : if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
13723 0 : primary->plane = !pipe;
13724 :
13725 0 : if (INTEL_INFO(dev)->gen >= 9) {
13726 : intel_primary_formats = skl_primary_formats;
13727 : num_formats = ARRAY_SIZE(skl_primary_formats);
13728 0 : } else if (INTEL_INFO(dev)->gen >= 4) {
13729 : intel_primary_formats = i965_primary_formats;
13730 : num_formats = ARRAY_SIZE(i965_primary_formats);
13731 0 : } else {
13732 : intel_primary_formats = i8xx_primary_formats;
13733 : num_formats = ARRAY_SIZE(i8xx_primary_formats);
13734 : }
13735 :
13736 0 : drm_universal_plane_init(dev, &primary->base, 0,
13737 : &intel_plane_funcs,
13738 : intel_primary_formats, num_formats,
13739 : DRM_PLANE_TYPE_PRIMARY);
13740 :
13741 0 : if (INTEL_INFO(dev)->gen >= 4)
13742 0 : intel_create_rotation_property(dev, primary);
13743 :
13744 0 : drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13745 :
13746 0 : return &primary->base;
13747 0 : }
13748 :
13749 0 : void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
13750 : {
13751 0 : if (!dev->mode_config.rotation_property) {
13752 : unsigned long flags = BIT(DRM_ROTATE_0) |
13753 : BIT(DRM_ROTATE_180);
13754 :
13755 0 : if (INTEL_INFO(dev)->gen >= 9)
13756 0 : flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
13757 :
13758 0 : dev->mode_config.rotation_property =
13759 0 : drm_mode_create_rotation_property(dev, flags);
13760 0 : }
13761 0 : if (dev->mode_config.rotation_property)
13762 0 : drm_object_attach_property(&plane->base.base,
13763 : dev->mode_config.rotation_property,
13764 0 : plane->base.state->rotation);
13765 0 : }
13766 :
13767 : static int
13768 0 : intel_check_cursor_plane(struct drm_plane *plane,
13769 : struct intel_crtc_state *crtc_state,
13770 : struct intel_plane_state *state)
13771 : {
13772 0 : struct drm_crtc *crtc = crtc_state->base.crtc;
13773 0 : struct drm_framebuffer *fb = state->base.fb;
13774 0 : struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13775 0 : enum pipe pipe = to_intel_plane(plane)->pipe;
13776 : unsigned stride;
13777 : int ret;
13778 :
13779 0 : ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13780 0 : &state->dst, &state->clip,
13781 : DRM_PLANE_HELPER_NO_SCALING,
13782 : DRM_PLANE_HELPER_NO_SCALING,
13783 0 : true, true, &state->visible);
13784 0 : if (ret)
13785 0 : return ret;
13786 :
13787 : /* if we want to turn off the cursor ignore width and height */
13788 0 : if (!obj)
13789 0 : return 0;
13790 :
13791 : /* Check for which cursor types we support */
13792 0 : if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
13793 : DRM_DEBUG("Cursor dimension %dx%d not supported\n",
13794 : state->base.crtc_w, state->base.crtc_h);
13795 0 : return -EINVAL;
13796 : }
13797 :
13798 0 : stride = roundup_pow_of_two(state->base.crtc_w) * 4;
13799 0 : if (obj->base.size < stride * state->base.crtc_h) {
13800 : DRM_DEBUG_KMS("buffer is too small\n");
13801 0 : return -ENOMEM;
13802 : }
13803 :
13804 0 : if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
13805 : DRM_DEBUG_KMS("cursor cannot be tiled\n");
13806 0 : return -EINVAL;
13807 : }
13808 :
13809 : /*
13810 : * There's something wrong with the cursor on CHV pipe C.
13811 : * If it straddles the left edge of the screen then
13812 : * moving it away from the edge or disabling it often
13813 : * results in a pipe underrun, and often that can lead to
13814 : * dead pipe (constant underrun reported, and it scans
13815 : * out just a solid color). To recover from that, the
13816 : * display power well must be turned off and on again.
13817 : * Refuse the put the cursor into that compromised position.
13818 : */
13819 0 : if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
13820 0 : state->visible && state->base.crtc_x < 0) {
13821 : DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
13822 0 : return -EINVAL;
13823 : }
13824 :
13825 0 : return 0;
13826 0 : }
13827 :
13828 : static void
13829 0 : intel_disable_cursor_plane(struct drm_plane *plane,
13830 : struct drm_crtc *crtc)
13831 : {
13832 0 : intel_crtc_update_cursor(crtc, false);
13833 0 : }
13834 :
13835 : static void
13836 0 : intel_commit_cursor_plane(struct drm_plane *plane,
13837 : struct intel_plane_state *state)
13838 : {
13839 0 : struct drm_crtc *crtc = state->base.crtc;
13840 0 : struct drm_device *dev = plane->dev;
13841 : struct intel_crtc *intel_crtc;
13842 0 : struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
13843 : uint32_t addr;
13844 :
13845 0 : crtc = crtc ? crtc : plane->crtc;
13846 0 : intel_crtc = to_intel_crtc(crtc);
13847 :
13848 0 : if (!obj)
13849 0 : addr = 0;
13850 0 : else if (!INTEL_INFO(dev)->cursor_needs_physical)
13851 0 : addr = i915_gem_obj_ggtt_offset(obj);
13852 : else
13853 0 : addr = obj->phys_handle->busaddr;
13854 :
13855 0 : intel_crtc->cursor_addr = addr;
13856 :
13857 0 : if (crtc->state->active)
13858 0 : intel_crtc_update_cursor(crtc, state->visible);
13859 0 : }
13860 :
13861 0 : static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
13862 : int pipe)
13863 : {
13864 : struct intel_plane *cursor;
13865 : struct intel_plane_state *state;
13866 :
13867 0 : cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13868 0 : if (cursor == NULL)
13869 0 : return NULL;
13870 :
13871 0 : state = intel_create_plane_state(&cursor->base);
13872 0 : if (!state) {
13873 0 : kfree(cursor);
13874 0 : return NULL;
13875 : }
13876 0 : cursor->base.state = &state->base;
13877 :
13878 0 : cursor->can_scale = false;
13879 0 : cursor->max_downscale = 1;
13880 0 : cursor->pipe = pipe;
13881 0 : cursor->plane = pipe;
13882 0 : cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
13883 0 : cursor->check_plane = intel_check_cursor_plane;
13884 0 : cursor->commit_plane = intel_commit_cursor_plane;
13885 0 : cursor->disable_plane = intel_disable_cursor_plane;
13886 :
13887 0 : drm_universal_plane_init(dev, &cursor->base, 0,
13888 : &intel_plane_funcs,
13889 : intel_cursor_formats,
13890 : ARRAY_SIZE(intel_cursor_formats),
13891 : DRM_PLANE_TYPE_CURSOR);
13892 :
13893 0 : if (INTEL_INFO(dev)->gen >= 4) {
13894 0 : if (!dev->mode_config.rotation_property)
13895 0 : dev->mode_config.rotation_property =
13896 0 : drm_mode_create_rotation_property(dev,
13897 : BIT(DRM_ROTATE_0) |
13898 : BIT(DRM_ROTATE_180));
13899 0 : if (dev->mode_config.rotation_property)
13900 0 : drm_object_attach_property(&cursor->base.base,
13901 : dev->mode_config.rotation_property,
13902 0 : state->base.rotation);
13903 : }
13904 :
13905 0 : if (INTEL_INFO(dev)->gen >=9)
13906 0 : state->scaler_id = -1;
13907 :
13908 0 : drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13909 :
13910 0 : return &cursor->base;
13911 0 : }
13912 :
13913 0 : static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
13914 : struct intel_crtc_state *crtc_state)
13915 : {
13916 : int i;
13917 : struct intel_scaler *intel_scaler;
13918 0 : struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
13919 :
13920 0 : for (i = 0; i < intel_crtc->num_scalers; i++) {
13921 0 : intel_scaler = &scaler_state->scalers[i];
13922 0 : intel_scaler->in_use = 0;
13923 0 : intel_scaler->mode = PS_SCALER_MODE_DYN;
13924 : }
13925 :
13926 0 : scaler_state->scaler_id = -1;
13927 0 : }
13928 :
13929 0 : static void intel_crtc_init(struct drm_device *dev, int pipe)
13930 : {
13931 0 : struct drm_i915_private *dev_priv = dev->dev_private;
13932 : struct intel_crtc *intel_crtc;
13933 : struct intel_crtc_state *crtc_state = NULL;
13934 : struct drm_plane *primary = NULL;
13935 : struct drm_plane *cursor = NULL;
13936 : int i, ret;
13937 :
13938 0 : intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13939 0 : if (intel_crtc == NULL)
13940 0 : return;
13941 :
13942 0 : crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13943 0 : if (!crtc_state)
13944 : goto fail;
13945 0 : intel_crtc->config = crtc_state;
13946 0 : intel_crtc->base.state = &crtc_state->base;
13947 0 : crtc_state->base.crtc = &intel_crtc->base;
13948 :
13949 : /* initialize shared scalers */
13950 0 : if (INTEL_INFO(dev)->gen >= 9) {
13951 0 : if (pipe == PIPE_C)
13952 0 : intel_crtc->num_scalers = 1;
13953 : else
13954 0 : intel_crtc->num_scalers = SKL_NUM_SCALERS;
13955 :
13956 0 : skl_init_scalers(dev, intel_crtc, crtc_state);
13957 0 : }
13958 :
13959 0 : primary = intel_primary_plane_create(dev, pipe);
13960 0 : if (!primary)
13961 : goto fail;
13962 :
13963 0 : cursor = intel_cursor_plane_create(dev, pipe);
13964 0 : if (!cursor)
13965 : goto fail;
13966 :
13967 0 : ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
13968 : cursor, &intel_crtc_funcs);
13969 0 : if (ret)
13970 : goto fail;
13971 :
13972 0 : drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
13973 0 : for (i = 0; i < 256; i++) {
13974 0 : intel_crtc->lut_r[i] = i;
13975 0 : intel_crtc->lut_g[i] = i;
13976 0 : intel_crtc->lut_b[i] = i;
13977 : }
13978 :
13979 : /*
13980 : * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
13981 : * is hooked to pipe B. Hence we want plane A feeding pipe B.
13982 : */
13983 0 : intel_crtc->pipe = pipe;
13984 0 : intel_crtc->plane = pipe;
13985 0 : if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
13986 : DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
13987 0 : intel_crtc->plane = !pipe;
13988 0 : }
13989 :
13990 0 : intel_crtc->cursor_base = ~0;
13991 0 : intel_crtc->cursor_cntl = ~0;
13992 0 : intel_crtc->cursor_size = ~0;
13993 :
13994 0 : intel_crtc->wm.cxsr_allowed = true;
13995 :
13996 0 : BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13997 : dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
13998 0 : dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
13999 0 : dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14000 :
14001 0 : drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14002 :
14003 0 : WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14004 0 : return;
14005 :
14006 : fail:
14007 0 : if (primary)
14008 0 : drm_plane_cleanup(primary);
14009 0 : if (cursor)
14010 0 : drm_plane_cleanup(cursor);
14011 0 : kfree(crtc_state);
14012 0 : kfree(intel_crtc);
14013 0 : }
14014 :
14015 0 : enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14016 : {
14017 0 : struct drm_encoder *encoder = connector->base.encoder;
14018 0 : struct drm_device *dev = connector->base.dev;
14019 :
14020 0 : WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14021 :
14022 0 : if (!encoder || WARN_ON(!encoder->crtc))
14023 0 : return INVALID_PIPE;
14024 :
14025 0 : return to_intel_crtc(encoder->crtc)->pipe;
14026 0 : }
14027 :
14028 0 : int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14029 : struct drm_file *file)
14030 : {
14031 0 : struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14032 : struct drm_crtc *drmmode_crtc;
14033 : struct intel_crtc *crtc;
14034 :
14035 0 : drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14036 :
14037 0 : if (!drmmode_crtc) {
14038 0 : DRM_ERROR("no such CRTC id\n");
14039 0 : return -ENOENT;
14040 : }
14041 :
14042 0 : crtc = to_intel_crtc(drmmode_crtc);
14043 0 : pipe_from_crtc_id->pipe = crtc->pipe;
14044 :
14045 0 : return 0;
14046 0 : }
14047 :
14048 0 : static int intel_encoder_clones(struct intel_encoder *encoder)
14049 : {
14050 0 : struct drm_device *dev = encoder->base.dev;
14051 : struct intel_encoder *source_encoder;
14052 : int index_mask = 0;
14053 : int entry = 0;
14054 :
14055 0 : for_each_intel_encoder(dev, source_encoder) {
14056 0 : if (encoders_cloneable(encoder, source_encoder))
14057 0 : index_mask |= (1 << entry);
14058 :
14059 0 : entry++;
14060 : }
14061 :
14062 0 : return index_mask;
14063 : }
14064 :
14065 0 : static bool has_edp_a(struct drm_device *dev)
14066 : {
14067 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14068 :
14069 0 : if (!IS_MOBILE(dev))
14070 0 : return false;
14071 :
14072 0 : if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14073 0 : return false;
14074 :
14075 0 : if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14076 0 : return false;
14077 :
14078 0 : return true;
14079 0 : }
14080 :
14081 0 : static bool intel_crt_present(struct drm_device *dev)
14082 : {
14083 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14084 :
14085 0 : if (INTEL_INFO(dev)->gen >= 9)
14086 0 : return false;
14087 :
14088 0 : if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14089 0 : return false;
14090 :
14091 0 : if (IS_CHERRYVIEW(dev))
14092 0 : return false;
14093 :
14094 0 : if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
14095 0 : return false;
14096 :
14097 0 : return true;
14098 0 : }
14099 :
14100 0 : static void intel_setup_outputs(struct drm_device *dev)
14101 : {
14102 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14103 : struct intel_encoder *encoder;
14104 : bool dpd_is_edp = false;
14105 :
14106 0 : intel_lvds_init(dev);
14107 :
14108 0 : if (intel_crt_present(dev))
14109 0 : intel_crt_init(dev);
14110 :
14111 0 : if (IS_BROXTON(dev)) {
14112 : /*
14113 : * FIXME: Broxton doesn't support port detection via the
14114 : * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14115 : * detect the ports.
14116 : */
14117 0 : intel_ddi_init(dev, PORT_A);
14118 0 : intel_ddi_init(dev, PORT_B);
14119 0 : intel_ddi_init(dev, PORT_C);
14120 0 : } else if (HAS_DDI(dev)) {
14121 : int found;
14122 :
14123 : /*
14124 : * Haswell uses DDI functions to detect digital outputs.
14125 : * On SKL pre-D0 the strap isn't connected, so we assume
14126 : * it's there.
14127 : */
14128 0 : found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14129 : /* WaIgnoreDDIAStrap: skl */
14130 0 : if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14131 0 : intel_ddi_init(dev, PORT_A);
14132 :
14133 : /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14134 : * register */
14135 0 : found = I915_READ(SFUSE_STRAP);
14136 :
14137 0 : if (found & SFUSE_STRAP_DDIB_DETECTED)
14138 0 : intel_ddi_init(dev, PORT_B);
14139 0 : if (found & SFUSE_STRAP_DDIC_DETECTED)
14140 0 : intel_ddi_init(dev, PORT_C);
14141 0 : if (found & SFUSE_STRAP_DDID_DETECTED)
14142 0 : intel_ddi_init(dev, PORT_D);
14143 : /*
14144 : * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14145 : */
14146 0 : if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14147 0 : (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14148 0 : dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14149 0 : dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14150 0 : intel_ddi_init(dev, PORT_E);
14151 :
14152 0 : } else if (HAS_PCH_SPLIT(dev)) {
14153 : int found;
14154 0 : dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14155 :
14156 0 : if (has_edp_a(dev))
14157 0 : intel_dp_init(dev, DP_A, PORT_A);
14158 :
14159 0 : if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14160 : /* PCH SDVOB multiplex with HDMIB */
14161 0 : found = intel_sdvo_init(dev, PCH_SDVOB, true);
14162 0 : if (!found)
14163 0 : intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14164 0 : if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14165 0 : intel_dp_init(dev, PCH_DP_B, PORT_B);
14166 : }
14167 :
14168 0 : if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14169 0 : intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14170 :
14171 0 : if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14172 0 : intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14173 :
14174 0 : if (I915_READ(PCH_DP_C) & DP_DETECTED)
14175 0 : intel_dp_init(dev, PCH_DP_C, PORT_C);
14176 :
14177 0 : if (I915_READ(PCH_DP_D) & DP_DETECTED)
14178 0 : intel_dp_init(dev, PCH_DP_D, PORT_D);
14179 0 : } else if (IS_VALLEYVIEW(dev)) {
14180 : bool has_edp, has_port;
14181 :
14182 : /*
14183 : * The DP_DETECTED bit is the latched state of the DDC
14184 : * SDA pin at boot. However since eDP doesn't require DDC
14185 : * (no way to plug in a DP->HDMI dongle) the DDC pins for
14186 : * eDP ports may have been muxed to an alternate function.
14187 : * Thus we can't rely on the DP_DETECTED bit alone to detect
14188 : * eDP ports. Consult the VBT as well as DP_DETECTED to
14189 : * detect eDP ports.
14190 : *
14191 : * Sadly the straps seem to be missing sometimes even for HDMI
14192 : * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14193 : * and VBT for the presence of the port. Additionally we can't
14194 : * trust the port type the VBT declares as we've seen at least
14195 : * HDMI ports that the VBT claim are DP or eDP.
14196 : */
14197 0 : has_edp = intel_dp_is_edp(dev, PORT_B);
14198 0 : has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14199 0 : if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14200 0 : has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14201 0 : if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14202 0 : intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14203 :
14204 0 : has_edp = intel_dp_is_edp(dev, PORT_C);
14205 0 : has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14206 0 : if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14207 0 : has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14208 0 : if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14209 0 : intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14210 :
14211 0 : if (IS_CHERRYVIEW(dev)) {
14212 : /*
14213 : * eDP not supported on port D,
14214 : * so no need to worry about it
14215 : */
14216 0 : has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14217 0 : if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14218 0 : intel_dp_init(dev, CHV_DP_D, PORT_D);
14219 0 : if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14220 0 : intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14221 : }
14222 :
14223 0 : intel_dsi_init(dev);
14224 0 : } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14225 : bool found = false;
14226 :
14227 0 : if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14228 : DRM_DEBUG_KMS("probing SDVOB\n");
14229 0 : found = intel_sdvo_init(dev, GEN3_SDVOB, true);
14230 0 : if (!found && IS_G4X(dev)) {
14231 : DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14232 0 : intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14233 0 : }
14234 :
14235 0 : if (!found && IS_G4X(dev))
14236 0 : intel_dp_init(dev, DP_B, PORT_B);
14237 : }
14238 :
14239 : /* Before G4X SDVOC doesn't have its own detect register */
14240 :
14241 0 : if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14242 : DRM_DEBUG_KMS("probing SDVOC\n");
14243 0 : found = intel_sdvo_init(dev, GEN3_SDVOC, false);
14244 0 : }
14245 :
14246 0 : if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14247 :
14248 0 : if (IS_G4X(dev)) {
14249 : DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14250 0 : intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14251 0 : }
14252 0 : if (IS_G4X(dev))
14253 0 : intel_dp_init(dev, DP_C, PORT_C);
14254 : }
14255 :
14256 0 : if (IS_G4X(dev) &&
14257 0 : (I915_READ(DP_D) & DP_DETECTED))
14258 0 : intel_dp_init(dev, DP_D, PORT_D);
14259 0 : } else if (IS_GEN2(dev))
14260 0 : intel_dvo_init(dev);
14261 :
14262 0 : if (SUPPORTS_TV(dev))
14263 0 : intel_tv_init(dev);
14264 :
14265 0 : intel_psr_init(dev);
14266 :
14267 0 : for_each_intel_encoder(dev, encoder) {
14268 0 : encoder->base.possible_crtcs = encoder->crtc_mask;
14269 0 : encoder->base.possible_clones =
14270 0 : intel_encoder_clones(encoder);
14271 : }
14272 :
14273 0 : intel_init_pch_refclk(dev);
14274 :
14275 0 : drm_helper_move_panel_connectors_to_head(dev);
14276 0 : }
14277 :
14278 0 : static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14279 : {
14280 0 : struct drm_device *dev = fb->dev;
14281 0 : struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14282 :
14283 0 : drm_framebuffer_cleanup(fb);
14284 0 : mutex_lock(&dev->struct_mutex);
14285 0 : WARN_ON(!intel_fb->obj->framebuffer_references--);
14286 0 : drm_gem_object_unreference(&intel_fb->obj->base);
14287 0 : mutex_unlock(&dev->struct_mutex);
14288 0 : kfree(intel_fb);
14289 0 : }
14290 :
14291 0 : static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14292 : struct drm_file *file,
14293 : unsigned int *handle)
14294 : {
14295 0 : struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14296 0 : struct drm_i915_gem_object *obj = intel_fb->obj;
14297 :
14298 0 : if (obj->userptr.mm) {
14299 : DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14300 0 : return -EINVAL;
14301 : }
14302 :
14303 0 : return drm_gem_handle_create(file, &obj->base, handle);
14304 0 : }
14305 :
14306 0 : static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14307 : struct drm_file *file,
14308 : unsigned flags, unsigned color,
14309 : struct drm_clip_rect *clips,
14310 : unsigned num_clips)
14311 : {
14312 0 : struct drm_device *dev = fb->dev;
14313 0 : struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14314 0 : struct drm_i915_gem_object *obj = intel_fb->obj;
14315 :
14316 0 : mutex_lock(&dev->struct_mutex);
14317 0 : intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14318 0 : mutex_unlock(&dev->struct_mutex);
14319 :
14320 0 : return 0;
14321 : }
14322 :
14323 : static const struct drm_framebuffer_funcs intel_fb_funcs = {
14324 : .destroy = intel_user_framebuffer_destroy,
14325 : .create_handle = intel_user_framebuffer_create_handle,
14326 : .dirty = intel_user_framebuffer_dirty,
14327 : };
14328 :
14329 : static
14330 0 : u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14331 : uint32_t pixel_format)
14332 : {
14333 0 : u32 gen = INTEL_INFO(dev)->gen;
14334 :
14335 0 : if (gen >= 9) {
14336 : /* "The stride in bytes must not exceed the of the size of 8K
14337 : * pixels and 32K bytes."
14338 : */
14339 0 : return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
14340 0 : } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
14341 0 : return 32*1024;
14342 0 : } else if (gen >= 4) {
14343 0 : if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14344 0 : return 16*1024;
14345 : else
14346 0 : return 32*1024;
14347 0 : } else if (gen >= 3) {
14348 0 : if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14349 0 : return 8*1024;
14350 : else
14351 0 : return 16*1024;
14352 : } else {
14353 : /* XXX DSPC is limited to 4k tiled */
14354 0 : return 8*1024;
14355 : }
14356 0 : }
14357 :
14358 0 : static int intel_framebuffer_init(struct drm_device *dev,
14359 : struct intel_framebuffer *intel_fb,
14360 : struct drm_mode_fb_cmd2 *mode_cmd,
14361 : struct drm_i915_gem_object *obj)
14362 : {
14363 : unsigned int aligned_height;
14364 : int ret;
14365 : u32 pitch_limit, stride_alignment;
14366 :
14367 0 : WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14368 :
14369 0 : if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14370 : /* Enforce that fb modifier and tiling mode match, but only for
14371 : * X-tiled. This is needed for FBC. */
14372 0 : if (!!(obj->tiling_mode == I915_TILING_X) !=
14373 0 : !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14374 : DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14375 0 : return -EINVAL;
14376 : }
14377 : } else {
14378 0 : if (obj->tiling_mode == I915_TILING_X)
14379 0 : mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14380 0 : else if (obj->tiling_mode == I915_TILING_Y) {
14381 : DRM_DEBUG("No Y tiling for legacy addfb\n");
14382 0 : return -EINVAL;
14383 : }
14384 : }
14385 :
14386 : /* Passed in modifier sanity checking. */
14387 0 : switch (mode_cmd->modifier[0]) {
14388 : case I915_FORMAT_MOD_Y_TILED:
14389 : case I915_FORMAT_MOD_Yf_TILED:
14390 0 : if (INTEL_INFO(dev)->gen < 9) {
14391 : DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14392 : mode_cmd->modifier[0]);
14393 0 : return -EINVAL;
14394 : }
14395 : case DRM_FORMAT_MOD_NONE:
14396 : case I915_FORMAT_MOD_X_TILED:
14397 : break;
14398 : default:
14399 : DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14400 : mode_cmd->modifier[0]);
14401 0 : return -EINVAL;
14402 : }
14403 :
14404 0 : stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
14405 0 : mode_cmd->pixel_format);
14406 0 : if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14407 : DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14408 : mode_cmd->pitches[0], stride_alignment);
14409 0 : return -EINVAL;
14410 : }
14411 :
14412 0 : pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14413 0 : mode_cmd->pixel_format);
14414 0 : if (mode_cmd->pitches[0] > pitch_limit) {
14415 : DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14416 : mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14417 : "tiled" : "linear",
14418 : mode_cmd->pitches[0], pitch_limit);
14419 0 : return -EINVAL;
14420 : }
14421 :
14422 0 : if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14423 0 : mode_cmd->pitches[0] != obj->stride) {
14424 : DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14425 : mode_cmd->pitches[0], obj->stride);
14426 0 : return -EINVAL;
14427 : }
14428 :
14429 : /* Reject formats not supported by any plane early. */
14430 0 : switch (mode_cmd->pixel_format) {
14431 : case DRM_FORMAT_C8:
14432 : case DRM_FORMAT_RGB565:
14433 : case DRM_FORMAT_XRGB8888:
14434 : case DRM_FORMAT_ARGB8888:
14435 : break;
14436 : case DRM_FORMAT_XRGB1555:
14437 0 : if (INTEL_INFO(dev)->gen > 3) {
14438 : DRM_DEBUG("unsupported pixel format: %s\n",
14439 : drm_get_format_name(mode_cmd->pixel_format));
14440 0 : return -EINVAL;
14441 : }
14442 : break;
14443 : case DRM_FORMAT_ABGR8888:
14444 0 : if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
14445 : DRM_DEBUG("unsupported pixel format: %s\n",
14446 : drm_get_format_name(mode_cmd->pixel_format));
14447 0 : return -EINVAL;
14448 : }
14449 : break;
14450 : case DRM_FORMAT_XBGR8888:
14451 : case DRM_FORMAT_XRGB2101010:
14452 : case DRM_FORMAT_XBGR2101010:
14453 0 : if (INTEL_INFO(dev)->gen < 4) {
14454 : DRM_DEBUG("unsupported pixel format: %s\n",
14455 : drm_get_format_name(mode_cmd->pixel_format));
14456 0 : return -EINVAL;
14457 : }
14458 : break;
14459 : case DRM_FORMAT_ABGR2101010:
14460 0 : if (!IS_VALLEYVIEW(dev)) {
14461 : DRM_DEBUG("unsupported pixel format: %s\n",
14462 : drm_get_format_name(mode_cmd->pixel_format));
14463 0 : return -EINVAL;
14464 : }
14465 : break;
14466 : case DRM_FORMAT_YUYV:
14467 : case DRM_FORMAT_UYVY:
14468 : case DRM_FORMAT_YVYU:
14469 : case DRM_FORMAT_VYUY:
14470 0 : if (INTEL_INFO(dev)->gen < 5) {
14471 : DRM_DEBUG("unsupported pixel format: %s\n",
14472 : drm_get_format_name(mode_cmd->pixel_format));
14473 0 : return -EINVAL;
14474 : }
14475 : break;
14476 : default:
14477 : DRM_DEBUG("unsupported pixel format: %s\n",
14478 : drm_get_format_name(mode_cmd->pixel_format));
14479 0 : return -EINVAL;
14480 : }
14481 :
14482 : /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14483 0 : if (mode_cmd->offsets[0] != 0)
14484 0 : return -EINVAL;
14485 :
14486 0 : aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14487 0 : mode_cmd->pixel_format,
14488 0 : mode_cmd->modifier[0]);
14489 : /* FIXME drm helper for size checks (especially planar formats)? */
14490 0 : if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14491 0 : return -EINVAL;
14492 :
14493 0 : drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14494 0 : intel_fb->obj = obj;
14495 0 : intel_fb->obj->framebuffer_references++;
14496 :
14497 0 : ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14498 0 : if (ret) {
14499 0 : DRM_ERROR("framebuffer init failed %d\n", ret);
14500 0 : return ret;
14501 : }
14502 :
14503 0 : return 0;
14504 0 : }
14505 :
14506 : static struct drm_framebuffer *
14507 0 : intel_user_framebuffer_create(struct drm_device *dev,
14508 : struct drm_file *filp,
14509 : struct drm_mode_fb_cmd2 *user_mode_cmd)
14510 : {
14511 : struct drm_i915_gem_object *obj;
14512 0 : struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14513 :
14514 0 : obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14515 : mode_cmd.handles[0]));
14516 0 : if (&obj->base == NULL)
14517 0 : return ERR_PTR(-ENOENT);
14518 :
14519 0 : return intel_framebuffer_create(dev, &mode_cmd, obj);
14520 0 : }
14521 :
14522 : #ifndef CONFIG_DRM_FBDEV_EMULATION
14523 : static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14524 : {
14525 : }
14526 : #endif
14527 :
14528 : static const struct drm_mode_config_funcs intel_mode_funcs = {
14529 : .fb_create = intel_user_framebuffer_create,
14530 : .output_poll_changed = intel_fbdev_output_poll_changed,
14531 : .atomic_check = intel_atomic_check,
14532 : .atomic_commit = intel_atomic_commit,
14533 : .atomic_state_alloc = intel_atomic_state_alloc,
14534 : .atomic_state_clear = intel_atomic_state_clear,
14535 : };
14536 :
14537 : /* Set up chip specific display functions */
14538 0 : static void intel_init_display(struct drm_device *dev)
14539 : {
14540 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14541 :
14542 0 : if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14543 0 : dev_priv->display.find_dpll = g4x_find_best_dpll;
14544 0 : else if (IS_CHERRYVIEW(dev))
14545 0 : dev_priv->display.find_dpll = chv_find_best_dpll;
14546 0 : else if (IS_VALLEYVIEW(dev))
14547 0 : dev_priv->display.find_dpll = vlv_find_best_dpll;
14548 0 : else if (IS_PINEVIEW(dev))
14549 0 : dev_priv->display.find_dpll = pnv_find_best_dpll;
14550 : else
14551 0 : dev_priv->display.find_dpll = i9xx_find_best_dpll;
14552 :
14553 0 : if (INTEL_INFO(dev)->gen >= 9) {
14554 0 : dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14555 0 : dev_priv->display.get_initial_plane_config =
14556 : skylake_get_initial_plane_config;
14557 0 : dev_priv->display.crtc_compute_clock =
14558 : haswell_crtc_compute_clock;
14559 0 : dev_priv->display.crtc_enable = haswell_crtc_enable;
14560 0 : dev_priv->display.crtc_disable = haswell_crtc_disable;
14561 0 : dev_priv->display.update_primary_plane =
14562 : skylake_update_primary_plane;
14563 0 : } else if (HAS_DDI(dev)) {
14564 0 : dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14565 0 : dev_priv->display.get_initial_plane_config =
14566 : ironlake_get_initial_plane_config;
14567 0 : dev_priv->display.crtc_compute_clock =
14568 : haswell_crtc_compute_clock;
14569 0 : dev_priv->display.crtc_enable = haswell_crtc_enable;
14570 0 : dev_priv->display.crtc_disable = haswell_crtc_disable;
14571 0 : dev_priv->display.update_primary_plane =
14572 : ironlake_update_primary_plane;
14573 0 : } else if (HAS_PCH_SPLIT(dev)) {
14574 0 : dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14575 0 : dev_priv->display.get_initial_plane_config =
14576 : ironlake_get_initial_plane_config;
14577 0 : dev_priv->display.crtc_compute_clock =
14578 : ironlake_crtc_compute_clock;
14579 0 : dev_priv->display.crtc_enable = ironlake_crtc_enable;
14580 0 : dev_priv->display.crtc_disable = ironlake_crtc_disable;
14581 0 : dev_priv->display.update_primary_plane =
14582 : ironlake_update_primary_plane;
14583 0 : } else if (IS_VALLEYVIEW(dev)) {
14584 : dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14585 : dev_priv->display.get_initial_plane_config =
14586 : i9xx_get_initial_plane_config;
14587 : dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14588 0 : dev_priv->display.crtc_enable = valleyview_crtc_enable;
14589 0 : dev_priv->display.crtc_disable = i9xx_crtc_disable;
14590 0 : dev_priv->display.update_primary_plane =
14591 : i9xx_update_primary_plane;
14592 0 : } else {
14593 : dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14594 : dev_priv->display.get_initial_plane_config =
14595 : i9xx_get_initial_plane_config;
14596 : dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14597 0 : dev_priv->display.crtc_enable = i9xx_crtc_enable;
14598 0 : dev_priv->display.crtc_disable = i9xx_crtc_disable;
14599 0 : dev_priv->display.update_primary_plane =
14600 : i9xx_update_primary_plane;
14601 : }
14602 :
14603 : /* Returns the core display clock speed */
14604 0 : if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14605 0 : dev_priv->display.get_display_clock_speed =
14606 : skylake_get_display_clock_speed;
14607 0 : else if (IS_BROXTON(dev))
14608 0 : dev_priv->display.get_display_clock_speed =
14609 : broxton_get_display_clock_speed;
14610 0 : else if (IS_BROADWELL(dev))
14611 0 : dev_priv->display.get_display_clock_speed =
14612 : broadwell_get_display_clock_speed;
14613 0 : else if (IS_HASWELL(dev))
14614 0 : dev_priv->display.get_display_clock_speed =
14615 : haswell_get_display_clock_speed;
14616 0 : else if (IS_VALLEYVIEW(dev))
14617 0 : dev_priv->display.get_display_clock_speed =
14618 : valleyview_get_display_clock_speed;
14619 0 : else if (IS_GEN5(dev))
14620 0 : dev_priv->display.get_display_clock_speed =
14621 : ilk_get_display_clock_speed;
14622 0 : else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14623 0 : IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14624 0 : dev_priv->display.get_display_clock_speed =
14625 : i945_get_display_clock_speed;
14626 0 : else if (IS_GM45(dev))
14627 0 : dev_priv->display.get_display_clock_speed =
14628 : gm45_get_display_clock_speed;
14629 0 : else if (IS_CRESTLINE(dev))
14630 0 : dev_priv->display.get_display_clock_speed =
14631 : i965gm_get_display_clock_speed;
14632 0 : else if (IS_PINEVIEW(dev))
14633 0 : dev_priv->display.get_display_clock_speed =
14634 : pnv_get_display_clock_speed;
14635 0 : else if (IS_G33(dev) || IS_G4X(dev))
14636 0 : dev_priv->display.get_display_clock_speed =
14637 : g33_get_display_clock_speed;
14638 0 : else if (IS_I915G(dev))
14639 0 : dev_priv->display.get_display_clock_speed =
14640 : i915_get_display_clock_speed;
14641 0 : else if (IS_I945GM(dev) || IS_845G(dev))
14642 0 : dev_priv->display.get_display_clock_speed =
14643 : i9xx_misc_get_display_clock_speed;
14644 0 : else if (IS_PINEVIEW(dev))
14645 0 : dev_priv->display.get_display_clock_speed =
14646 : pnv_get_display_clock_speed;
14647 0 : else if (IS_I915GM(dev))
14648 0 : dev_priv->display.get_display_clock_speed =
14649 : i915gm_get_display_clock_speed;
14650 0 : else if (IS_I865G(dev))
14651 0 : dev_priv->display.get_display_clock_speed =
14652 : i865_get_display_clock_speed;
14653 0 : else if (IS_I85X(dev))
14654 0 : dev_priv->display.get_display_clock_speed =
14655 : i85x_get_display_clock_speed;
14656 : else { /* 830 */
14657 0 : WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
14658 0 : dev_priv->display.get_display_clock_speed =
14659 : i830_get_display_clock_speed;
14660 : }
14661 :
14662 0 : if (IS_GEN5(dev)) {
14663 0 : dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14664 0 : } else if (IS_GEN6(dev)) {
14665 0 : dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14666 0 : } else if (IS_IVYBRIDGE(dev)) {
14667 : /* FIXME: detect B0+ stepping and use auto training */
14668 0 : dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14669 0 : } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
14670 0 : dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14671 0 : if (IS_BROADWELL(dev)) {
14672 0 : dev_priv->display.modeset_commit_cdclk =
14673 : broadwell_modeset_commit_cdclk;
14674 0 : dev_priv->display.modeset_calc_cdclk =
14675 : broadwell_modeset_calc_cdclk;
14676 0 : }
14677 0 : } else if (IS_VALLEYVIEW(dev)) {
14678 0 : dev_priv->display.modeset_commit_cdclk =
14679 : valleyview_modeset_commit_cdclk;
14680 0 : dev_priv->display.modeset_calc_cdclk =
14681 : valleyview_modeset_calc_cdclk;
14682 0 : } else if (IS_BROXTON(dev)) {
14683 0 : dev_priv->display.modeset_commit_cdclk =
14684 : broxton_modeset_commit_cdclk;
14685 0 : dev_priv->display.modeset_calc_cdclk =
14686 : broxton_modeset_calc_cdclk;
14687 0 : }
14688 :
14689 0 : switch (INTEL_INFO(dev)->gen) {
14690 : case 2:
14691 0 : dev_priv->display.queue_flip = intel_gen2_queue_flip;
14692 0 : break;
14693 :
14694 : case 3:
14695 0 : dev_priv->display.queue_flip = intel_gen3_queue_flip;
14696 0 : break;
14697 :
14698 : case 4:
14699 : case 5:
14700 0 : dev_priv->display.queue_flip = intel_gen4_queue_flip;
14701 0 : break;
14702 :
14703 : case 6:
14704 0 : dev_priv->display.queue_flip = intel_gen6_queue_flip;
14705 0 : break;
14706 : case 7:
14707 : case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
14708 0 : dev_priv->display.queue_flip = intel_gen7_queue_flip;
14709 0 : break;
14710 : case 9:
14711 : /* Drop through - unsupported since execlist only. */
14712 : default:
14713 : /* Default just returns -ENODEV to indicate unsupported */
14714 0 : dev_priv->display.queue_flip = intel_default_queue_flip;
14715 0 : }
14716 :
14717 0 : rw_init(&dev_priv->pps_mutex, "pps");
14718 0 : }
14719 :
14720 : /*
14721 : * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
14722 : * resume, or other times. This quirk makes sure that's the case for
14723 : * affected systems.
14724 : */
14725 0 : static void quirk_pipea_force(struct drm_device *dev)
14726 : {
14727 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14728 :
14729 0 : dev_priv->quirks |= QUIRK_PIPEA_FORCE;
14730 : DRM_INFO("applying pipe a force quirk\n");
14731 0 : }
14732 :
14733 0 : static void quirk_pipeb_force(struct drm_device *dev)
14734 : {
14735 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14736 :
14737 0 : dev_priv->quirks |= QUIRK_PIPEB_FORCE;
14738 : DRM_INFO("applying pipe b force quirk\n");
14739 0 : }
14740 :
14741 : /*
14742 : * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14743 : */
14744 0 : static void quirk_ssc_force_disable(struct drm_device *dev)
14745 : {
14746 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14747 0 : dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14748 : DRM_INFO("applying lvds SSC disable quirk\n");
14749 0 : }
14750 :
14751 : /*
14752 : * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14753 : * brightness value
14754 : */
14755 0 : static void quirk_invert_brightness(struct drm_device *dev)
14756 : {
14757 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14758 0 : dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14759 : DRM_INFO("applying inverted panel brightness quirk\n");
14760 0 : }
14761 :
14762 : /* Some VBT's incorrectly indicate no backlight is present */
14763 0 : static void quirk_backlight_present(struct drm_device *dev)
14764 : {
14765 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14766 0 : dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14767 : DRM_INFO("applying backlight present quirk\n");
14768 0 : }
14769 :
14770 : struct intel_quirk {
14771 : int device;
14772 : int subsystem_vendor;
14773 : int subsystem_device;
14774 : void (*hook)(struct drm_device *dev);
14775 : };
14776 :
14777 : /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14778 : struct intel_dmi_quirk {
14779 : void (*hook)(struct drm_device *dev);
14780 : const struct dmi_system_id (*dmi_id_list)[];
14781 : };
14782 :
14783 : #ifdef notyet
14784 : static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14785 : {
14786 : DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14787 : return 1;
14788 : }
14789 :
14790 : static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14791 : {
14792 : .dmi_id_list = &(const struct dmi_system_id[]) {
14793 : {
14794 : .callback = intel_dmi_reverse_brightness,
14795 : .ident = "NCR Corporation",
14796 : .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14797 : DMI_MATCH(DMI_PRODUCT_NAME, ""),
14798 : },
14799 : },
14800 : { } /* terminating entry */
14801 : },
14802 : .hook = quirk_invert_brightness,
14803 : },
14804 : };
14805 : #endif
14806 :
14807 : static struct intel_quirk intel_quirks[] = {
14808 : /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
14809 : { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
14810 :
14811 : /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
14812 : { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
14813 :
14814 : /* 830 needs to leave pipe A & dpll A up */
14815 : { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
14816 :
14817 : /* 830 needs to leave pipe B & dpll B up */
14818 : { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
14819 :
14820 : /* Lenovo U160 cannot use SSC on LVDS */
14821 : { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14822 :
14823 : /* Sony Vaio Y cannot use SSC on LVDS */
14824 : { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14825 :
14826 : /* Acer Aspire 5734Z must invert backlight brightness */
14827 : { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14828 :
14829 : /* Acer/eMachines G725 */
14830 : { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14831 :
14832 : /* Acer/eMachines e725 */
14833 : { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14834 :
14835 : /* Acer/Packard Bell NCL20 */
14836 : { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14837 :
14838 : /* Acer Aspire 4736Z */
14839 : { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14840 :
14841 : /* Acer Aspire 5336 */
14842 : { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14843 :
14844 : /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14845 : { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14846 :
14847 : /* Acer C720 Chromebook (Core i3 4005U) */
14848 : { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14849 :
14850 : /* Apple Macbook 2,1 (Core 2 T7400) */
14851 : { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14852 :
14853 : /* Apple Macbook 4,1 */
14854 : { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14855 :
14856 : /* Toshiba CB35 Chromebook (Celeron 2955U) */
14857 : { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14858 :
14859 : /* HP Chromebook 14 (Celeron 2955U) */
14860 : { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14861 :
14862 : /* Dell Chromebook 11 */
14863 : { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14864 :
14865 : /* Dell Chromebook 11 (2015 version) */
14866 : { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14867 : };
14868 :
14869 0 : static void intel_init_quirks(struct drm_device *dev)
14870 : {
14871 0 : struct pci_dev *d = dev->pdev;
14872 : int i;
14873 :
14874 0 : for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14875 0 : struct intel_quirk *q = &intel_quirks[i];
14876 :
14877 0 : if (d->device == q->device &&
14878 0 : (d->subsystem_vendor == q->subsystem_vendor ||
14879 0 : q->subsystem_vendor == PCI_ANY_ID) &&
14880 0 : (d->subsystem_device == q->subsystem_device ||
14881 0 : q->subsystem_device == PCI_ANY_ID))
14882 0 : q->hook(dev);
14883 : }
14884 : #ifdef notyet
14885 : for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14886 : if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14887 : intel_dmi_quirks[i].hook(dev);
14888 : }
14889 : #endif
14890 0 : }
14891 :
14892 : /* Disable the VGA plane that we never use */
14893 0 : static void i915_disable_vga(struct drm_device *dev)
14894 : {
14895 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14896 : u8 sr1;
14897 0 : u32 vga_reg = i915_vgacntrl_reg(dev);
14898 :
14899 : /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
14900 0 : vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
14901 : #ifdef __linux__
14902 : outb(SR01, VGA_SR_INDEX);
14903 : sr1 = inb(VGA_SR_DATA);
14904 : outb(sr1 | 1<<5, VGA_SR_DATA);
14905 : #else
14906 0 : outb(VGA_SR_INDEX, SR01);
14907 0 : sr1 = inb(VGA_SR_DATA);
14908 0 : outb(VGA_SR_DATA, sr1 | 1<<5);
14909 : #endif
14910 0 : vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
14911 0 : udelay(300);
14912 :
14913 0 : I915_WRITE(vga_reg, VGA_DISP_DISABLE);
14914 0 : POSTING_READ(vga_reg);
14915 0 : }
14916 :
14917 0 : void intel_modeset_init_hw(struct drm_device *dev)
14918 : {
14919 0 : intel_update_cdclk(dev);
14920 0 : intel_prepare_ddi(dev);
14921 0 : intel_init_clock_gating(dev);
14922 0 : intel_enable_gt_powersave(dev);
14923 0 : }
14924 :
14925 0 : void intel_modeset_init(struct drm_device *dev)
14926 : {
14927 0 : struct drm_i915_private *dev_priv = dev->dev_private;
14928 : int sprite, ret;
14929 : enum pipe pipe;
14930 : struct intel_crtc *crtc;
14931 :
14932 0 : drm_mode_config_init(dev);
14933 :
14934 0 : dev->mode_config.min_width = 0;
14935 0 : dev->mode_config.min_height = 0;
14936 :
14937 0 : dev->mode_config.preferred_depth = 24;
14938 0 : dev->mode_config.prefer_shadow = 1;
14939 :
14940 0 : dev->mode_config.allow_fb_modifiers = true;
14941 :
14942 0 : dev->mode_config.funcs = &intel_mode_funcs;
14943 :
14944 0 : intel_init_quirks(dev);
14945 :
14946 0 : intel_init_pm(dev);
14947 :
14948 0 : if (INTEL_INFO(dev)->num_pipes == 0)
14949 0 : return;
14950 :
14951 : /*
14952 : * There may be no VBT; and if the BIOS enabled SSC we can
14953 : * just keep using it to avoid unnecessary flicker. Whereas if the
14954 : * BIOS isn't using it, don't assume it will work even if the VBT
14955 : * indicates as much.
14956 : */
14957 0 : if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
14958 0 : bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14959 : DREF_SSC1_ENABLE);
14960 :
14961 0 : if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
14962 : DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
14963 : bios_lvds_use_ssc ? "en" : "dis",
14964 : dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
14965 0 : dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
14966 0 : }
14967 0 : }
14968 :
14969 0 : intel_init_display(dev);
14970 0 : intel_init_audio(dev);
14971 :
14972 0 : if (IS_GEN2(dev)) {
14973 0 : dev->mode_config.max_width = 2048;
14974 0 : dev->mode_config.max_height = 2048;
14975 0 : } else if (IS_GEN3(dev)) {
14976 0 : dev->mode_config.max_width = 4096;
14977 0 : dev->mode_config.max_height = 4096;
14978 0 : } else {
14979 0 : dev->mode_config.max_width = 8192;
14980 0 : dev->mode_config.max_height = 8192;
14981 : }
14982 :
14983 0 : if (IS_845G(dev) || IS_I865G(dev)) {
14984 0 : dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
14985 0 : dev->mode_config.cursor_height = 1023;
14986 0 : } else if (IS_GEN2(dev)) {
14987 0 : dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
14988 0 : dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
14989 0 : } else {
14990 0 : dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
14991 0 : dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
14992 : }
14993 :
14994 0 : dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
14995 :
14996 : DRM_DEBUG_KMS("%d display pipe%s available.\n",
14997 : INTEL_INFO(dev)->num_pipes,
14998 : INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
14999 :
15000 0 : for_each_pipe(dev_priv, pipe) {
15001 0 : intel_crtc_init(dev, pipe);
15002 0 : for_each_sprite(dev_priv, pipe, sprite) {
15003 0 : ret = intel_plane_init(dev, pipe, sprite);
15004 : if (ret)
15005 : DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15006 : pipe_name(pipe), sprite_name(pipe, sprite), ret);
15007 : }
15008 : }
15009 :
15010 0 : intel_update_czclk(dev_priv);
15011 0 : intel_update_cdclk(dev);
15012 :
15013 0 : intel_shared_dpll_init(dev);
15014 :
15015 : /* Just disable it once at startup */
15016 0 : i915_disable_vga(dev);
15017 0 : intel_setup_outputs(dev);
15018 :
15019 : /* Just in case the BIOS is doing something questionable. */
15020 0 : intel_fbc_disable(dev_priv);
15021 :
15022 0 : drm_modeset_lock_all(dev);
15023 0 : intel_modeset_setup_hw_state(dev);
15024 0 : drm_modeset_unlock_all(dev);
15025 :
15026 0 : for_each_intel_crtc(dev, crtc) {
15027 0 : struct intel_initial_plane_config plane_config = {};
15028 :
15029 0 : if (!crtc->active)
15030 0 : continue;
15031 :
15032 : /*
15033 : * Note that reserving the BIOS fb up front prevents us
15034 : * from stuffing other stolen allocations like the ring
15035 : * on top. This prevents some ugliness at boot time, and
15036 : * can even allow for smooth boot transitions if the BIOS
15037 : * fb is large enough for the active pipe configuration.
15038 : */
15039 0 : dev_priv->display.get_initial_plane_config(crtc,
15040 : &plane_config);
15041 :
15042 : /*
15043 : * If the fb is shared between multiple heads, we'll
15044 : * just get the first one.
15045 : */
15046 0 : intel_find_initial_plane_obj(crtc, &plane_config);
15047 0 : }
15048 0 : }
15049 :
15050 0 : static void intel_enable_pipe_a(struct drm_device *dev)
15051 : {
15052 : struct intel_connector *connector;
15053 : struct drm_connector *crt = NULL;
15054 0 : struct intel_load_detect_pipe load_detect_temp;
15055 0 : struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15056 :
15057 : /* We can't just switch on the pipe A, we need to set things up with a
15058 : * proper mode and output configuration. As a gross hack, enable pipe A
15059 : * by enabling the load detect pipe once. */
15060 0 : for_each_intel_connector(dev, connector) {
15061 0 : if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15062 : crt = &connector->base;
15063 0 : break;
15064 : }
15065 : }
15066 :
15067 0 : if (!crt)
15068 0 : return;
15069 :
15070 0 : if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15071 0 : intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15072 0 : }
15073 :
15074 : static bool
15075 0 : intel_check_plane_mapping(struct intel_crtc *crtc)
15076 : {
15077 0 : struct drm_device *dev = crtc->base.dev;
15078 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15079 : u32 val;
15080 :
15081 0 : if (INTEL_INFO(dev)->num_pipes == 1)
15082 0 : return true;
15083 :
15084 0 : val = I915_READ(DSPCNTR(!crtc->plane));
15085 :
15086 0 : if ((val & DISPLAY_PLANE_ENABLE) &&
15087 0 : (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15088 0 : return false;
15089 :
15090 0 : return true;
15091 0 : }
15092 :
15093 0 : static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15094 : {
15095 0 : struct drm_device *dev = crtc->base.dev;
15096 : struct intel_encoder *encoder;
15097 :
15098 0 : for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15099 0 : return true;
15100 :
15101 0 : return false;
15102 0 : }
15103 :
15104 0 : static void intel_sanitize_crtc(struct intel_crtc *crtc)
15105 : {
15106 0 : struct drm_device *dev = crtc->base.dev;
15107 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15108 : u32 reg;
15109 :
15110 : /* Clear any frame start delays used for debugging left by the BIOS */
15111 0 : reg = PIPECONF(crtc->config->cpu_transcoder);
15112 0 : I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15113 :
15114 : /* restore vblank interrupts to correct state */
15115 0 : drm_crtc_vblank_reset(&crtc->base);
15116 0 : if (crtc->active) {
15117 : struct intel_plane *plane;
15118 :
15119 0 : drm_crtc_vblank_on(&crtc->base);
15120 :
15121 : /* Disable everything but the primary plane */
15122 0 : for_each_intel_plane_on_crtc(dev, crtc, plane) {
15123 0 : if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15124 : continue;
15125 :
15126 0 : plane->disable_plane(&plane->base, &crtc->base);
15127 0 : }
15128 0 : }
15129 :
15130 : /* We need to sanitize the plane -> pipe mapping first because this will
15131 : * disable the crtc (and hence change the state) if it is wrong. Note
15132 : * that gen4+ has a fixed plane -> pipe mapping. */
15133 0 : if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15134 : bool plane;
15135 :
15136 : DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15137 : crtc->base.base.id);
15138 :
15139 : /* Pipe has the wrong plane attached and the plane is active.
15140 : * Temporarily change the plane mapping and disable everything
15141 : * ... */
15142 0 : plane = crtc->plane;
15143 0 : to_intel_plane_state(crtc->base.primary->state)->visible = true;
15144 0 : crtc->plane = !plane;
15145 0 : intel_crtc_disable_noatomic(&crtc->base);
15146 0 : crtc->plane = plane;
15147 0 : }
15148 :
15149 0 : if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15150 0 : crtc->pipe == PIPE_A && !crtc->active) {
15151 : /* BIOS forgot to enable pipe A, this mostly happens after
15152 : * resume. Force-enable the pipe to fix this, the update_dpms
15153 : * call below we restore the pipe to the right state, but leave
15154 : * the required bits on. */
15155 0 : intel_enable_pipe_a(dev);
15156 0 : }
15157 :
15158 : /* Adjust the state of the output pipe according to whether we
15159 : * have active connectors/encoders. */
15160 0 : if (!intel_crtc_has_encoders(crtc))
15161 0 : intel_crtc_disable_noatomic(&crtc->base);
15162 :
15163 0 : if (crtc->active != crtc->base.state->active) {
15164 : struct intel_encoder *encoder;
15165 :
15166 : /* This can happen either due to bugs in the get_hw_state
15167 : * functions or because of calls to intel_crtc_disable_noatomic,
15168 : * or because the pipe is force-enabled due to the
15169 : * pipe A quirk. */
15170 : DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15171 : crtc->base.base.id,
15172 : crtc->base.state->enable ? "enabled" : "disabled",
15173 : crtc->active ? "enabled" : "disabled");
15174 :
15175 0 : WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15176 0 : crtc->base.state->active = crtc->active;
15177 0 : crtc->base.enabled = crtc->active;
15178 :
15179 : /* Because we only establish the connector -> encoder ->
15180 : * crtc links if something is active, this means the
15181 : * crtc is now deactivated. Break the links. connector
15182 : * -> encoder links are only establish when things are
15183 : * actually up, hence no need to break them. */
15184 0 : WARN_ON(crtc->active);
15185 :
15186 0 : for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15187 0 : encoder->base.crtc = NULL;
15188 0 : }
15189 :
15190 0 : if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15191 : /*
15192 : * We start out with underrun reporting disabled to avoid races.
15193 : * For correct bookkeeping mark this on active crtcs.
15194 : *
15195 : * Also on gmch platforms we dont have any hardware bits to
15196 : * disable the underrun reporting. Which means we need to start
15197 : * out with underrun reporting disabled also on inactive pipes,
15198 : * since otherwise we'll complain about the garbage we read when
15199 : * e.g. coming up after runtime pm.
15200 : *
15201 : * No protection against concurrent access is required - at
15202 : * worst a fifo underrun happens which also sets this to false.
15203 : */
15204 0 : crtc->cpu_fifo_underrun_disabled = true;
15205 0 : crtc->pch_fifo_underrun_disabled = true;
15206 0 : }
15207 0 : }
15208 :
15209 0 : static void intel_sanitize_encoder(struct intel_encoder *encoder)
15210 : {
15211 : struct intel_connector *connector;
15212 0 : struct drm_device *dev = encoder->base.dev;
15213 : bool active = false;
15214 :
15215 : /* We need to check both for a crtc link (meaning that the
15216 : * encoder is active and trying to read from a pipe) and the
15217 : * pipe itself being active. */
15218 0 : bool has_active_crtc = encoder->base.crtc &&
15219 0 : to_intel_crtc(encoder->base.crtc)->active;
15220 :
15221 0 : for_each_intel_connector(dev, connector) {
15222 0 : if (connector->base.encoder != &encoder->base)
15223 : continue;
15224 :
15225 : active = true;
15226 0 : break;
15227 : }
15228 :
15229 0 : if (active && !has_active_crtc) {
15230 : DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15231 : encoder->base.base.id,
15232 : encoder->base.name);
15233 :
15234 : /* Connector is active, but has no active pipe. This is
15235 : * fallout from our resume register restoring. Disable
15236 : * the encoder manually again. */
15237 0 : if (encoder->base.crtc) {
15238 : DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15239 : encoder->base.base.id,
15240 : encoder->base.name);
15241 0 : encoder->disable(encoder);
15242 0 : if (encoder->post_disable)
15243 0 : encoder->post_disable(encoder);
15244 : }
15245 0 : encoder->base.crtc = NULL;
15246 :
15247 : /* Inconsistent output/port/pipe state happens presumably due to
15248 : * a bug in one of the get_hw_state functions. Or someplace else
15249 : * in our code, like the register restore mess on resume. Clamp
15250 : * things to off as a safer default. */
15251 0 : for_each_intel_connector(dev, connector) {
15252 0 : if (connector->encoder != encoder)
15253 : continue;
15254 0 : connector->base.dpms = DRM_MODE_DPMS_OFF;
15255 0 : connector->base.encoder = NULL;
15256 0 : }
15257 : }
15258 : /* Enabled encoders without active connectors will be fixed in
15259 : * the crtc fixup. */
15260 0 : }
15261 :
15262 0 : void i915_redisable_vga_power_on(struct drm_device *dev)
15263 : {
15264 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15265 0 : u32 vga_reg = i915_vgacntrl_reg(dev);
15266 :
15267 0 : if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15268 : DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15269 0 : i915_disable_vga(dev);
15270 0 : }
15271 0 : }
15272 :
15273 0 : void i915_redisable_vga(struct drm_device *dev)
15274 : {
15275 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15276 :
15277 : /* This function can be called both from intel_modeset_setup_hw_state or
15278 : * at a very early point in our resume sequence, where the power well
15279 : * structures are not yet restored. Since this function is at a very
15280 : * paranoid "someone might have enabled VGA while we were not looking"
15281 : * level, just check if the power well is enabled instead of trying to
15282 : * follow the "don't touch the power well if we don't need it" policy
15283 : * the rest of the driver uses. */
15284 0 : if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
15285 0 : return;
15286 :
15287 0 : i915_redisable_vga_power_on(dev);
15288 0 : }
15289 :
15290 0 : static bool primary_get_hw_state(struct intel_plane *plane)
15291 : {
15292 0 : struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15293 :
15294 0 : return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15295 : }
15296 :
15297 : /* FIXME read out full plane state for all planes */
15298 0 : static void readout_plane_state(struct intel_crtc *crtc)
15299 : {
15300 0 : struct drm_plane *primary = crtc->base.primary;
15301 : struct intel_plane_state *plane_state =
15302 0 : to_intel_plane_state(primary->state);
15303 :
15304 0 : plane_state->visible =
15305 0 : primary_get_hw_state(to_intel_plane(primary));
15306 :
15307 0 : if (plane_state->visible)
15308 0 : crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15309 0 : }
15310 :
15311 0 : static void intel_modeset_readout_hw_state(struct drm_device *dev)
15312 : {
15313 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15314 0 : enum pipe pipe;
15315 : struct intel_crtc *crtc;
15316 : struct intel_encoder *encoder;
15317 : struct intel_connector *connector;
15318 : int i;
15319 :
15320 0 : for_each_intel_crtc(dev, crtc) {
15321 0 : __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
15322 0 : memset(crtc->config, 0, sizeof(*crtc->config));
15323 0 : crtc->config->base.crtc = &crtc->base;
15324 :
15325 0 : crtc->active = dev_priv->display.get_pipe_config(crtc,
15326 0 : crtc->config);
15327 :
15328 0 : crtc->base.state->active = crtc->active;
15329 0 : crtc->base.enabled = crtc->active;
15330 :
15331 0 : readout_plane_state(crtc);
15332 :
15333 : DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15334 : crtc->base.base.id,
15335 : crtc->active ? "enabled" : "disabled");
15336 : }
15337 :
15338 0 : for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15339 0 : struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15340 :
15341 0 : pll->on = pll->get_hw_state(dev_priv, pll,
15342 0 : &pll->config.hw_state);
15343 0 : pll->active = 0;
15344 0 : pll->config.crtc_mask = 0;
15345 0 : for_each_intel_crtc(dev, crtc) {
15346 0 : if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
15347 0 : pll->active++;
15348 0 : pll->config.crtc_mask |= 1 << crtc->pipe;
15349 0 : }
15350 : }
15351 :
15352 : DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15353 : pll->name, pll->config.crtc_mask, pll->on);
15354 :
15355 0 : if (pll->config.crtc_mask)
15356 0 : intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
15357 : }
15358 :
15359 0 : for_each_intel_encoder(dev, encoder) {
15360 0 : pipe = 0;
15361 :
15362 0 : if (encoder->get_hw_state(encoder, &pipe)) {
15363 0 : crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15364 0 : encoder->base.crtc = &crtc->base;
15365 0 : encoder->get_config(encoder, crtc->config);
15366 0 : } else {
15367 0 : encoder->base.crtc = NULL;
15368 : }
15369 :
15370 : DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15371 : encoder->base.base.id,
15372 : encoder->base.name,
15373 : encoder->base.crtc ? "enabled" : "disabled",
15374 : pipe_name(pipe));
15375 : }
15376 :
15377 0 : for_each_intel_connector(dev, connector) {
15378 0 : if (connector->get_hw_state(connector)) {
15379 0 : connector->base.dpms = DRM_MODE_DPMS_ON;
15380 0 : connector->base.encoder = &connector->encoder->base;
15381 0 : } else {
15382 0 : connector->base.dpms = DRM_MODE_DPMS_OFF;
15383 0 : connector->base.encoder = NULL;
15384 : }
15385 : DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15386 : connector->base.base.id,
15387 : connector->base.name,
15388 : connector->base.encoder ? "enabled" : "disabled");
15389 : }
15390 :
15391 0 : for_each_intel_crtc(dev, crtc) {
15392 0 : crtc->base.hwmode = crtc->config->base.adjusted_mode;
15393 :
15394 0 : memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15395 0 : if (crtc->base.state->active) {
15396 0 : intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15397 0 : intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15398 0 : WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15399 :
15400 : /*
15401 : * The initial mode needs to be set in order to keep
15402 : * the atomic core happy. It wants a valid mode if the
15403 : * crtc's enabled, so we do the above call.
15404 : *
15405 : * At this point some state updated by the connectors
15406 : * in their ->detect() callback has not run yet, so
15407 : * no recalculation can be done yet.
15408 : *
15409 : * Even if we could do a recalculation and modeset
15410 : * right now it would cause a double modeset if
15411 : * fbdev or userspace chooses a different initial mode.
15412 : *
15413 : * If that happens, someone indicated they wanted a
15414 : * mode change, which means it's safe to do a full
15415 : * recalculation.
15416 : */
15417 0 : crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15418 :
15419 0 : drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15420 0 : update_scanline_offset(crtc);
15421 0 : }
15422 : }
15423 0 : }
15424 :
15425 : /* Scan out the current hw modeset state,
15426 : * and sanitizes it to the current state
15427 : */
15428 : static void
15429 0 : intel_modeset_setup_hw_state(struct drm_device *dev)
15430 : {
15431 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15432 : enum pipe pipe;
15433 : struct intel_crtc *crtc;
15434 : struct intel_encoder *encoder;
15435 : int i;
15436 :
15437 0 : intel_modeset_readout_hw_state(dev);
15438 :
15439 : /* HW state is read out, now we need to sanitize this mess. */
15440 0 : for_each_intel_encoder(dev, encoder) {
15441 0 : intel_sanitize_encoder(encoder);
15442 : }
15443 :
15444 0 : for_each_pipe(dev_priv, pipe) {
15445 0 : crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15446 0 : intel_sanitize_crtc(crtc);
15447 0 : intel_dump_pipe_config(crtc, crtc->config,
15448 : "[setup_hw_state]");
15449 : }
15450 :
15451 0 : intel_modeset_update_connector_atomic_state(dev);
15452 :
15453 0 : for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15454 0 : struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15455 :
15456 0 : if (!pll->on || pll->active)
15457 0 : continue;
15458 :
15459 : DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15460 :
15461 0 : pll->disable(dev_priv, pll);
15462 0 : pll->on = false;
15463 0 : }
15464 :
15465 0 : if (IS_VALLEYVIEW(dev))
15466 0 : vlv_wm_get_hw_state(dev);
15467 0 : else if (IS_GEN9(dev))
15468 0 : skl_wm_get_hw_state(dev);
15469 0 : else if (HAS_PCH_SPLIT(dev))
15470 0 : ilk_wm_get_hw_state(dev);
15471 :
15472 0 : for_each_intel_crtc(dev, crtc) {
15473 : unsigned long put_domains;
15474 :
15475 0 : put_domains = modeset_get_crtc_power_domains(&crtc->base);
15476 0 : if (WARN_ON(put_domains))
15477 0 : modeset_put_power_domains(dev_priv, put_domains);
15478 : }
15479 0 : intel_display_set_init_power(dev_priv, false);
15480 0 : }
15481 :
15482 0 : void intel_display_resume(struct drm_device *dev)
15483 : {
15484 0 : struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
15485 : struct intel_connector *conn;
15486 : struct intel_plane *plane;
15487 : struct drm_crtc *crtc;
15488 : int ret;
15489 :
15490 0 : if (!state)
15491 0 : return;
15492 :
15493 0 : state->acquire_ctx = dev->mode_config.acquire_ctx;
15494 :
15495 : /* preserve complete old state, including dpll */
15496 0 : intel_atomic_get_shared_dpll_state(state);
15497 :
15498 0 : for_each_crtc(dev, crtc) {
15499 : struct drm_crtc_state *crtc_state =
15500 0 : drm_atomic_get_crtc_state(state, crtc);
15501 :
15502 0 : ret = PTR_ERR_OR_ZERO(crtc_state);
15503 0 : if (ret)
15504 0 : goto err;
15505 :
15506 : /* force a restore */
15507 0 : crtc_state->mode_changed = true;
15508 0 : }
15509 :
15510 0 : for_each_intel_plane(dev, plane) {
15511 0 : ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
15512 0 : if (ret)
15513 : goto err;
15514 : }
15515 :
15516 0 : for_each_intel_connector(dev, conn) {
15517 0 : ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
15518 0 : if (ret)
15519 : goto err;
15520 : }
15521 :
15522 0 : intel_modeset_setup_hw_state(dev);
15523 :
15524 0 : i915_redisable_vga(dev);
15525 0 : ret = drm_atomic_commit(state);
15526 0 : if (!ret)
15527 0 : return;
15528 :
15529 : err:
15530 0 : DRM_ERROR("Restoring old state failed with %i\n", ret);
15531 0 : drm_atomic_state_free(state);
15532 0 : }
15533 :
15534 0 : void intel_modeset_gem_init(struct drm_device *dev)
15535 : {
15536 : struct drm_crtc *c;
15537 : struct drm_i915_gem_object *obj;
15538 : int ret;
15539 :
15540 0 : mutex_lock(&dev->struct_mutex);
15541 0 : intel_init_gt_powersave(dev);
15542 0 : mutex_unlock(&dev->struct_mutex);
15543 :
15544 0 : intel_modeset_init_hw(dev);
15545 :
15546 0 : intel_setup_overlay(dev);
15547 :
15548 : /*
15549 : * Make sure any fbs we allocated at startup are properly
15550 : * pinned & fenced. When we do the allocation it's too early
15551 : * for this.
15552 : */
15553 0 : for_each_crtc(dev, c) {
15554 0 : obj = intel_fb_obj(c->primary->fb);
15555 0 : if (obj == NULL)
15556 : continue;
15557 :
15558 0 : mutex_lock(&dev->struct_mutex);
15559 0 : ret = intel_pin_and_fence_fb_obj(c->primary,
15560 0 : c->primary->fb,
15561 0 : c->primary->state,
15562 : NULL, NULL);
15563 0 : mutex_unlock(&dev->struct_mutex);
15564 0 : if (ret) {
15565 0 : DRM_ERROR("failed to pin boot fb on pipe %d\n",
15566 : to_intel_crtc(c)->pipe);
15567 0 : drm_framebuffer_unreference(c->primary->fb);
15568 0 : c->primary->fb = NULL;
15569 0 : c->primary->crtc = c->primary->state->crtc = NULL;
15570 0 : update_state_fb(c->primary);
15571 0 : c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
15572 0 : }
15573 : }
15574 :
15575 0 : intel_backlight_register(dev);
15576 0 : }
15577 :
15578 0 : void intel_connector_unregister(struct intel_connector *intel_connector)
15579 : {
15580 0 : struct drm_connector *connector = &intel_connector->base;
15581 :
15582 0 : intel_panel_destroy_backlight(connector);
15583 0 : drm_connector_unregister(connector);
15584 0 : }
15585 :
15586 0 : void intel_modeset_cleanup(struct drm_device *dev)
15587 : {
15588 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15589 : struct drm_connector *connector;
15590 :
15591 0 : intel_disable_gt_powersave(dev);
15592 :
15593 0 : intel_backlight_unregister(dev);
15594 :
15595 : /*
15596 : * Interrupts and polling as the first thing to avoid creating havoc.
15597 : * Too much stuff here (turning of connectors, ...) would
15598 : * experience fancy races otherwise.
15599 : */
15600 0 : intel_irq_uninstall(dev_priv);
15601 :
15602 : /*
15603 : * Due to the hpd irq storm handling the hotplug work can re-arm the
15604 : * poll handlers. Hence disable polling after hpd handling is shut down.
15605 : */
15606 0 : drm_kms_helper_poll_fini(dev);
15607 :
15608 : #ifdef notyet
15609 : intel_unregister_dsm_handler();
15610 : #endif
15611 :
15612 0 : intel_fbc_disable(dev_priv);
15613 :
15614 : /* flush any delayed tasks or pending work */
15615 0 : flush_scheduled_work();
15616 :
15617 : /* destroy the backlight and sysfs files before encoders/connectors */
15618 0 : list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
15619 : struct intel_connector *intel_connector;
15620 :
15621 0 : intel_connector = to_intel_connector(connector);
15622 0 : intel_connector->unregister(intel_connector);
15623 : }
15624 :
15625 0 : drm_mode_config_cleanup(dev);
15626 :
15627 0 : intel_cleanup_overlay(dev);
15628 :
15629 0 : mutex_lock(&dev->struct_mutex);
15630 0 : intel_cleanup_gt_powersave(dev);
15631 0 : mutex_unlock(&dev->struct_mutex);
15632 :
15633 0 : intel_teardown_gmbus(dev);
15634 0 : }
15635 :
15636 : /*
15637 : * Return which encoder is currently attached for connector.
15638 : */
15639 0 : struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
15640 : {
15641 0 : return &intel_attached_encoder(connector)->base;
15642 : }
15643 :
15644 0 : void intel_connector_attach_encoder(struct intel_connector *connector,
15645 : struct intel_encoder *encoder)
15646 : {
15647 0 : connector->encoder = encoder;
15648 0 : drm_mode_connector_attach_encoder(&connector->base,
15649 0 : &encoder->base);
15650 0 : }
15651 :
15652 : /*
15653 : * set vga decode state - true == enable VGA decode
15654 : */
15655 0 : int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
15656 : {
15657 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15658 0 : unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15659 0 : u16 gmch_ctrl;
15660 :
15661 0 : if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15662 0 : DRM_ERROR("failed to read control word\n");
15663 0 : return -EIO;
15664 : }
15665 :
15666 0 : if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15667 0 : return 0;
15668 :
15669 0 : if (state)
15670 0 : gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15671 : else
15672 0 : gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
15673 :
15674 0 : if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15675 0 : DRM_ERROR("failed to write control word\n");
15676 0 : return -EIO;
15677 : }
15678 :
15679 0 : return 0;
15680 0 : }
15681 :
15682 : struct intel_display_error_state {
15683 :
15684 : u32 power_well_driver;
15685 :
15686 : int num_transcoders;
15687 :
15688 : struct intel_cursor_error_state {
15689 : u32 control;
15690 : u32 position;
15691 : u32 base;
15692 : u32 size;
15693 : } cursor[I915_MAX_PIPES];
15694 :
15695 : struct intel_pipe_error_state {
15696 : bool power_domain_on;
15697 : u32 source;
15698 : u32 stat;
15699 : } pipe[I915_MAX_PIPES];
15700 :
15701 : struct intel_plane_error_state {
15702 : u32 control;
15703 : u32 stride;
15704 : u32 size;
15705 : u32 pos;
15706 : u32 addr;
15707 : u32 surface;
15708 : u32 tile_offset;
15709 : } plane[I915_MAX_PIPES];
15710 :
15711 : struct intel_transcoder_error_state {
15712 : bool power_domain_on;
15713 : enum transcoder cpu_transcoder;
15714 :
15715 : u32 conf;
15716 :
15717 : u32 htotal;
15718 : u32 hblank;
15719 : u32 hsync;
15720 : u32 vtotal;
15721 : u32 vblank;
15722 : u32 vsync;
15723 : } transcoder[4];
15724 : };
15725 :
15726 : struct intel_display_error_state *
15727 0 : intel_display_capture_error_state(struct drm_device *dev)
15728 : {
15729 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15730 : struct intel_display_error_state *error;
15731 0 : int transcoders[] = {
15732 : TRANSCODER_A,
15733 : TRANSCODER_B,
15734 : TRANSCODER_C,
15735 : TRANSCODER_EDP,
15736 : };
15737 : int i;
15738 :
15739 0 : if (INTEL_INFO(dev)->num_pipes == 0)
15740 0 : return NULL;
15741 :
15742 0 : error = kzalloc(sizeof(*error), GFP_ATOMIC);
15743 0 : if (error == NULL)
15744 0 : return NULL;
15745 :
15746 0 : if (IS_HASWELL(dev) || IS_BROADWELL(dev))
15747 0 : error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
15748 :
15749 0 : for_each_pipe(dev_priv, i) {
15750 0 : error->pipe[i].power_domain_on =
15751 0 : __intel_display_power_is_enabled(dev_priv,
15752 : POWER_DOMAIN_PIPE(i));
15753 0 : if (!error->pipe[i].power_domain_on)
15754 : continue;
15755 :
15756 0 : error->cursor[i].control = I915_READ(CURCNTR(i));
15757 0 : error->cursor[i].position = I915_READ(CURPOS(i));
15758 0 : error->cursor[i].base = I915_READ(CURBASE(i));
15759 :
15760 0 : error->plane[i].control = I915_READ(DSPCNTR(i));
15761 0 : error->plane[i].stride = I915_READ(DSPSTRIDE(i));
15762 0 : if (INTEL_INFO(dev)->gen <= 3) {
15763 0 : error->plane[i].size = I915_READ(DSPSIZE(i));
15764 0 : error->plane[i].pos = I915_READ(DSPPOS(i));
15765 0 : }
15766 0 : if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
15767 0 : error->plane[i].addr = I915_READ(DSPADDR(i));
15768 0 : if (INTEL_INFO(dev)->gen >= 4) {
15769 0 : error->plane[i].surface = I915_READ(DSPSURF(i));
15770 0 : error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15771 0 : }
15772 :
15773 0 : error->pipe[i].source = I915_READ(PIPESRC(i));
15774 :
15775 0 : if (HAS_GMCH_DISPLAY(dev))
15776 0 : error->pipe[i].stat = I915_READ(PIPESTAT(i));
15777 : }
15778 :
15779 0 : error->num_transcoders = INTEL_INFO(dev)->num_pipes;
15780 0 : if (HAS_DDI(dev_priv->dev))
15781 0 : error->num_transcoders++; /* Account for eDP. */
15782 :
15783 0 : for (i = 0; i < error->num_transcoders; i++) {
15784 0 : enum transcoder cpu_transcoder = transcoders[i];
15785 :
15786 0 : error->transcoder[i].power_domain_on =
15787 0 : __intel_display_power_is_enabled(dev_priv,
15788 0 : POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15789 0 : if (!error->transcoder[i].power_domain_on)
15790 0 : continue;
15791 :
15792 0 : error->transcoder[i].cpu_transcoder = cpu_transcoder;
15793 :
15794 0 : error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15795 0 : error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15796 0 : error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15797 0 : error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15798 0 : error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15799 0 : error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15800 0 : error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
15801 0 : }
15802 :
15803 0 : return error;
15804 0 : }
15805 :
15806 : #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15807 :
15808 : void
15809 0 : intel_display_print_error_state(struct drm_i915_error_state_buf *m,
15810 : struct drm_device *dev,
15811 : struct intel_display_error_state *error)
15812 : {
15813 0 : struct drm_i915_private *dev_priv = dev->dev_private;
15814 : int i;
15815 :
15816 0 : if (!error)
15817 0 : return;
15818 :
15819 0 : err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
15820 0 : if (IS_HASWELL(dev) || IS_BROADWELL(dev))
15821 0 : err_printf(m, "PWR_WELL_CTL2: %08x\n",
15822 : error->power_well_driver);
15823 0 : for_each_pipe(dev_priv, i) {
15824 0 : err_printf(m, "Pipe [%d]:\n", i);
15825 0 : err_printf(m, " Power: %s\n",
15826 : error->pipe[i].power_domain_on ? "on" : "off");
15827 0 : err_printf(m, " SRC: %08x\n", error->pipe[i].source);
15828 0 : err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
15829 :
15830 0 : err_printf(m, "Plane [%d]:\n", i);
15831 0 : err_printf(m, " CNTR: %08x\n", error->plane[i].control);
15832 0 : err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
15833 0 : if (INTEL_INFO(dev)->gen <= 3) {
15834 0 : err_printf(m, " SIZE: %08x\n", error->plane[i].size);
15835 0 : err_printf(m, " POS: %08x\n", error->plane[i].pos);
15836 0 : }
15837 0 : if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
15838 0 : err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
15839 0 : if (INTEL_INFO(dev)->gen >= 4) {
15840 0 : err_printf(m, " SURF: %08x\n", error->plane[i].surface);
15841 0 : err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
15842 0 : }
15843 :
15844 0 : err_printf(m, "Cursor [%d]:\n", i);
15845 0 : err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
15846 0 : err_printf(m, " POS: %08x\n", error->cursor[i].position);
15847 0 : err_printf(m, " BASE: %08x\n", error->cursor[i].base);
15848 : }
15849 :
15850 0 : for (i = 0; i < error->num_transcoders; i++) {
15851 0 : err_printf(m, "CPU transcoder: %c\n",
15852 : transcoder_name(error->transcoder[i].cpu_transcoder));
15853 0 : err_printf(m, " Power: %s\n",
15854 : error->transcoder[i].power_domain_on ? "on" : "off");
15855 0 : err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
15856 0 : err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
15857 0 : err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
15858 0 : err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
15859 0 : err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
15860 0 : err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
15861 0 : err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
15862 : }
15863 0 : }
15864 :
15865 0 : void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
15866 : {
15867 : struct intel_crtc *crtc;
15868 :
15869 0 : for_each_intel_crtc(dev, crtc) {
15870 : struct intel_unpin_work *work;
15871 :
15872 0 : spin_lock_irq(&dev->event_lock);
15873 :
15874 0 : work = crtc->unpin_work;
15875 :
15876 0 : if (work && work->event &&
15877 0 : work->event->base.file_priv == file) {
15878 0 : kfree(work->event);
15879 0 : work->event = NULL;
15880 0 : }
15881 :
15882 0 : spin_unlock_irq(&dev->event_lock);
15883 : }
15884 0 : }
|