Line data Source code
1 : /*
2 : * Copyright © 2008 Intel Corporation
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice (including the next
12 : * paragraph) shall be included in all copies or substantial portions of the
13 : * Software.
14 : *
15 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 : * IN THE SOFTWARE.
22 : *
23 : * Authors:
24 : * Keith Packard <keithp@keithp.com>
25 : *
26 : */
27 :
28 : #ifdef __linux__
29 : #include <linux/i2c.h>
30 : #include <linux/slab.h>
31 : #include <linux/export.h>
32 : #include <linux/notifier.h>
33 : #include <linux/reboot.h>
34 : #endif
35 : #include <dev/pci/drm/drmP.h>
36 : #include <dev/pci/drm/drm_atomic_helper.h>
37 : #include <dev/pci/drm/drm_crtc.h>
38 : #include <dev/pci/drm/drm_crtc_helper.h>
39 : #include <dev/pci/drm/drm_edid.h>
40 : #include "intel_drv.h"
41 : #include <dev/pci/drm/i915_drm.h>
42 : #include "i915_drv.h"
43 :
44 : #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
45 :
46 : /* Compliance test status bits */
47 : #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
48 : #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 : #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 : #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51 :
52 : struct dp_link_dpll {
53 : int clock;
54 : struct dpll dpll;
55 : };
56 :
57 : static const struct dp_link_dpll gen4_dpll[] = {
58 : { 162000,
59 : { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 : { 270000,
61 : { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 : };
63 :
64 : static const struct dp_link_dpll pch_dpll[] = {
65 : { 162000,
66 : { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 : { 270000,
68 : { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 : };
70 :
71 : static const struct dp_link_dpll vlv_dpll[] = {
72 : { 162000,
73 : { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 : { 270000,
75 : { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 : };
77 :
78 : /*
79 : * CHV supports eDP 1.4 that have more link rates.
80 : * Below only provides the fixed rate but exclude variable rate.
81 : */
82 : static const struct dp_link_dpll chv_dpll[] = {
83 : /*
84 : * CHV requires to program fractional division for m2.
85 : * m2 is stored in fixed point format using formula below
86 : * (m2_int << 22) | m2_fraction
87 : */
88 : { 162000, /* m2_int = 32, m2_fraction = 1677722 */
89 : { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 : { 270000, /* m2_int = 27, m2_fraction = 0 */
91 : { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 : { 540000, /* m2_int = 27, m2_fraction = 0 */
93 : { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 : };
95 :
96 : static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 : 324000, 432000, 540000 };
98 : static const int skl_rates[] = { 162000, 216000, 270000,
99 : 324000, 432000, 540000 };
100 : static const int default_rates[] = { 162000, 270000, 540000 };
101 :
102 : /**
103 : * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104 : * @intel_dp: DP struct
105 : *
106 : * If a CPU or PCH DP output is attached to an eDP panel, this function
107 : * will return true, and false otherwise.
108 : */
109 0 : static bool is_edp(struct intel_dp *intel_dp)
110 : {
111 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 :
113 0 : return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 : }
115 :
116 0 : static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 : {
118 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 :
120 0 : return intel_dig_port->base.base.dev;
121 : }
122 :
123 0 : static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 : {
125 0 : return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 : }
127 :
128 : static void intel_dp_link_down(struct intel_dp *intel_dp);
129 : static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 : static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 : static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 : static void vlv_steal_power_sequencer(struct drm_device *dev,
133 : enum pipe pipe);
134 :
135 0 : static unsigned int intel_dp_unused_lane_mask(int lane_count)
136 : {
137 0 : return ~((1 << lane_count) - 1) & 0xf;
138 : }
139 :
140 : static int
141 0 : intel_dp_max_link_bw(struct intel_dp *intel_dp)
142 : {
143 0 : int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
144 :
145 0 : switch (max_link_bw) {
146 : case DP_LINK_BW_1_62:
147 : case DP_LINK_BW_2_7:
148 : case DP_LINK_BW_5_4:
149 : break;
150 : default:
151 0 : WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
152 : max_link_bw);
153 : max_link_bw = DP_LINK_BW_1_62;
154 0 : break;
155 : }
156 0 : return max_link_bw;
157 : }
158 :
159 0 : static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
160 : {
161 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
162 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
163 : u8 source_max, sink_max;
164 :
165 : source_max = 4;
166 0 : if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
167 0 : (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
168 0 : source_max = 2;
169 :
170 0 : sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
171 :
172 0 : return min(source_max, sink_max);
173 : }
174 :
175 : /*
176 : * The units on the numbers in the next two are... bizarre. Examples will
177 : * make it clearer; this one parallels an example in the eDP spec.
178 : *
179 : * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
180 : *
181 : * 270000 * 1 * 8 / 10 == 216000
182 : *
183 : * The actual data capacity of that configuration is 2.16Gbit/s, so the
184 : * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
185 : * or equivalently, kilopixels per second - so for 1680x1050R it'd be
186 : * 119000. At 18bpp that's 2142000 kilobits per second.
187 : *
188 : * Thus the strange-looking division by 10 in intel_dp_link_required, to
189 : * get the result in decakilobits instead of kilobits.
190 : */
191 :
192 : static int
193 0 : intel_dp_link_required(int pixel_clock, int bpp)
194 : {
195 0 : return (pixel_clock * bpp + 9) / 10;
196 : }
197 :
198 : static int
199 0 : intel_dp_max_data_rate(int max_link_clock, int max_lanes)
200 : {
201 0 : return (max_link_clock * max_lanes * 8) / 10;
202 : }
203 :
204 : static enum drm_mode_status
205 0 : intel_dp_mode_valid(struct drm_connector *connector,
206 : struct drm_display_mode *mode)
207 : {
208 0 : struct intel_dp *intel_dp = intel_attached_dp(connector);
209 0 : struct intel_connector *intel_connector = to_intel_connector(connector);
210 0 : struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
211 0 : int target_clock = mode->clock;
212 : int max_rate, mode_rate, max_lanes, max_link_clock;
213 :
214 0 : if (is_edp(intel_dp) && fixed_mode) {
215 0 : if (mode->hdisplay > fixed_mode->hdisplay)
216 0 : return MODE_PANEL;
217 :
218 0 : if (mode->vdisplay > fixed_mode->vdisplay)
219 0 : return MODE_PANEL;
220 :
221 0 : target_clock = fixed_mode->clock;
222 0 : }
223 :
224 0 : max_link_clock = intel_dp_max_link_rate(intel_dp);
225 0 : max_lanes = intel_dp_max_lane_count(intel_dp);
226 :
227 0 : max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
228 0 : mode_rate = intel_dp_link_required(target_clock, 18);
229 :
230 0 : if (mode_rate > max_rate)
231 0 : return MODE_CLOCK_HIGH;
232 :
233 0 : if (mode->clock < 10000)
234 0 : return MODE_CLOCK_LOW;
235 :
236 0 : if (mode->flags & DRM_MODE_FLAG_DBLCLK)
237 0 : return MODE_H_ILLEGAL;
238 :
239 0 : return MODE_OK;
240 0 : }
241 :
242 0 : uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
243 : {
244 : int i;
245 : uint32_t v = 0;
246 :
247 0 : if (src_bytes > 4)
248 0 : src_bytes = 4;
249 0 : for (i = 0; i < src_bytes; i++)
250 0 : v |= ((uint32_t) src[i]) << ((3-i) * 8);
251 0 : return v;
252 : }
253 :
254 0 : static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
255 : {
256 : int i;
257 0 : if (dst_bytes > 4)
258 0 : dst_bytes = 4;
259 0 : for (i = 0; i < dst_bytes; i++)
260 0 : dst[i] = src >> ((3-i) * 8);
261 0 : }
262 :
263 : static void
264 : intel_dp_init_panel_power_sequencer(struct drm_device *dev,
265 : struct intel_dp *intel_dp);
266 : static void
267 : intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
268 : struct intel_dp *intel_dp);
269 :
270 0 : static void pps_lock(struct intel_dp *intel_dp)
271 : {
272 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
273 0 : struct intel_encoder *encoder = &intel_dig_port->base;
274 0 : struct drm_device *dev = encoder->base.dev;
275 0 : struct drm_i915_private *dev_priv = dev->dev_private;
276 : enum intel_display_power_domain power_domain;
277 :
278 : /*
279 : * See vlv_power_sequencer_reset() why we need
280 : * a power domain reference here.
281 : */
282 0 : power_domain = intel_display_port_aux_power_domain(encoder);
283 0 : intel_display_power_get(dev_priv, power_domain);
284 :
285 0 : mutex_lock(&dev_priv->pps_mutex);
286 0 : }
287 :
288 0 : static void pps_unlock(struct intel_dp *intel_dp)
289 : {
290 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
291 0 : struct intel_encoder *encoder = &intel_dig_port->base;
292 0 : struct drm_device *dev = encoder->base.dev;
293 0 : struct drm_i915_private *dev_priv = dev->dev_private;
294 : enum intel_display_power_domain power_domain;
295 :
296 0 : mutex_unlock(&dev_priv->pps_mutex);
297 :
298 0 : power_domain = intel_display_port_aux_power_domain(encoder);
299 0 : intel_display_power_put(dev_priv, power_domain);
300 0 : }
301 :
302 : static void
303 0 : vlv_power_sequencer_kick(struct intel_dp *intel_dp)
304 : {
305 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
307 0 : struct drm_i915_private *dev_priv = dev->dev_private;
308 0 : enum pipe pipe = intel_dp->pps_pipe;
309 : bool pll_enabled, release_cl_override = false;
310 0 : enum dpio_phy phy = DPIO_PHY(pipe);
311 0 : enum dpio_channel ch = vlv_pipe_to_channel(pipe);
312 : uint32_t DP;
313 :
314 0 : if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
315 : "skipping pipe %c power seqeuncer kick due to port %c being active\n",
316 : pipe_name(pipe), port_name(intel_dig_port->port)))
317 0 : return;
318 :
319 : DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
320 : pipe_name(pipe), port_name(intel_dig_port->port));
321 :
322 : /* Preserve the BIOS-computed detected bit. This is
323 : * supposed to be read-only.
324 : */
325 0 : DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
326 : DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
327 : DP |= DP_PORT_WIDTH(1);
328 : DP |= DP_LINK_TRAIN_PAT_1;
329 :
330 0 : if (IS_CHERRYVIEW(dev))
331 0 : DP |= DP_PIPE_SELECT_CHV(pipe);
332 0 : else if (pipe == PIPE_B)
333 0 : DP |= DP_PIPEB_SELECT;
334 :
335 0 : pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
336 :
337 : /*
338 : * The DPLL for the pipe must be enabled for this to work.
339 : * So enable temporarily it if it's not already enabled.
340 : */
341 0 : if (!pll_enabled) {
342 0 : release_cl_override = IS_CHERRYVIEW(dev) &&
343 0 : !chv_phy_powergate_ch(dev_priv, phy, ch, true);
344 :
345 0 : vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
346 : &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
347 0 : }
348 :
349 : /*
350 : * Similar magic as in intel_dp_enable_port().
351 : * We _must_ do this port enable + disable trick
352 : * to make this power seqeuencer lock onto the port.
353 : * Otherwise even VDD force bit won't work.
354 : */
355 0 : I915_WRITE(intel_dp->output_reg, DP);
356 0 : POSTING_READ(intel_dp->output_reg);
357 :
358 0 : I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
359 0 : POSTING_READ(intel_dp->output_reg);
360 :
361 0 : I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
362 0 : POSTING_READ(intel_dp->output_reg);
363 :
364 0 : if (!pll_enabled) {
365 0 : vlv_force_pll_off(dev, pipe);
366 :
367 0 : if (release_cl_override)
368 0 : chv_phy_powergate_ch(dev_priv, phy, ch, false);
369 : }
370 0 : }
371 :
372 : static enum pipe
373 0 : vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
374 : {
375 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
376 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
377 0 : struct drm_i915_private *dev_priv = dev->dev_private;
378 : struct intel_encoder *encoder;
379 : unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380 : enum pipe pipe;
381 :
382 : lockdep_assert_held(&dev_priv->pps_mutex);
383 :
384 : /* We should never land here with regular DP ports */
385 0 : WARN_ON(!is_edp(intel_dp));
386 :
387 0 : if (intel_dp->pps_pipe != INVALID_PIPE)
388 0 : return intel_dp->pps_pipe;
389 :
390 : /*
391 : * We don't have power sequencer currently.
392 : * Pick one that's not used by other ports.
393 : */
394 0 : list_for_each_entry(encoder, &dev->mode_config.encoder_list,
395 : base.head) {
396 : struct intel_dp *tmp;
397 :
398 0 : if (encoder->type != INTEL_OUTPUT_EDP)
399 0 : continue;
400 :
401 0 : tmp = enc_to_intel_dp(&encoder->base);
402 :
403 0 : if (tmp->pps_pipe != INVALID_PIPE)
404 0 : pipes &= ~(1 << tmp->pps_pipe);
405 0 : }
406 :
407 : /*
408 : * Didn't find one. This should not happen since there
409 : * are two power sequencers and up to two eDP ports.
410 : */
411 0 : if (WARN_ON(pipes == 0))
412 0 : pipe = PIPE_A;
413 : else
414 0 : pipe = ffs(pipes) - 1;
415 :
416 0 : vlv_steal_power_sequencer(dev, pipe);
417 0 : intel_dp->pps_pipe = pipe;
418 :
419 : DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
420 : pipe_name(intel_dp->pps_pipe),
421 : port_name(intel_dig_port->port));
422 :
423 : /* init power sequencer on this pipe and port */
424 0 : intel_dp_init_panel_power_sequencer(dev, intel_dp);
425 0 : intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
426 :
427 : /*
428 : * Even vdd force doesn't work until we've made
429 : * the power sequencer lock in on the port.
430 : */
431 0 : vlv_power_sequencer_kick(intel_dp);
432 :
433 0 : return intel_dp->pps_pipe;
434 0 : }
435 :
436 : typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
437 : enum pipe pipe);
438 :
439 0 : static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
440 : enum pipe pipe)
441 : {
442 0 : return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
443 : }
444 :
445 0 : static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
446 : enum pipe pipe)
447 : {
448 0 : return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
449 : }
450 :
451 0 : static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
452 : enum pipe pipe)
453 : {
454 0 : return true;
455 : }
456 :
457 : static enum pipe
458 0 : vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
459 : enum port port,
460 : vlv_pipe_check pipe_check)
461 : {
462 : enum pipe pipe;
463 :
464 0 : for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
465 0 : u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
466 : PANEL_PORT_SELECT_MASK;
467 :
468 0 : if (port_sel != PANEL_PORT_SELECT_VLV(port))
469 0 : continue;
470 :
471 0 : if (!pipe_check(dev_priv, pipe))
472 0 : continue;
473 :
474 0 : return pipe;
475 : }
476 :
477 0 : return INVALID_PIPE;
478 0 : }
479 :
480 : static void
481 0 : vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
482 : {
483 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
484 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
485 0 : struct drm_i915_private *dev_priv = dev->dev_private;
486 0 : enum port port = intel_dig_port->port;
487 :
488 : lockdep_assert_held(&dev_priv->pps_mutex);
489 :
490 : /* try to find a pipe with this port selected */
491 : /* first pick one where the panel is on */
492 0 : intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493 : vlv_pipe_has_pp_on);
494 : /* didn't find one? pick one where vdd is on */
495 0 : if (intel_dp->pps_pipe == INVALID_PIPE)
496 0 : intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497 : vlv_pipe_has_vdd_on);
498 : /* didn't find one? pick one with just the correct port */
499 0 : if (intel_dp->pps_pipe == INVALID_PIPE)
500 0 : intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
501 : vlv_pipe_any);
502 :
503 : /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
504 0 : if (intel_dp->pps_pipe == INVALID_PIPE) {
505 : DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
506 : port_name(port));
507 0 : return;
508 : }
509 :
510 : DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
511 : port_name(port), pipe_name(intel_dp->pps_pipe));
512 :
513 0 : intel_dp_init_panel_power_sequencer(dev, intel_dp);
514 0 : intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
515 0 : }
516 :
517 0 : void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
518 : {
519 0 : struct drm_device *dev = dev_priv->dev;
520 : struct intel_encoder *encoder;
521 :
522 0 : if (WARN_ON(!IS_VALLEYVIEW(dev)))
523 0 : return;
524 :
525 : /*
526 : * We can't grab pps_mutex here due to deadlock with power_domain
527 : * mutex when power_domain functions are called while holding pps_mutex.
528 : * That also means that in order to use pps_pipe the code needs to
529 : * hold both a power domain reference and pps_mutex, and the power domain
530 : * reference get/put must be done while _not_ holding pps_mutex.
531 : * pps_{lock,unlock}() do these steps in the correct order, so one
532 : * should use them always.
533 : */
534 :
535 0 : list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
536 : struct intel_dp *intel_dp;
537 :
538 0 : if (encoder->type != INTEL_OUTPUT_EDP)
539 0 : continue;
540 :
541 0 : intel_dp = enc_to_intel_dp(&encoder->base);
542 0 : intel_dp->pps_pipe = INVALID_PIPE;
543 0 : }
544 0 : }
545 :
546 0 : static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
547 : {
548 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
549 :
550 0 : if (IS_BROXTON(dev))
551 0 : return BXT_PP_CONTROL(0);
552 0 : else if (HAS_PCH_SPLIT(dev))
553 0 : return PCH_PP_CONTROL;
554 : else
555 0 : return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
556 0 : }
557 :
558 0 : static u32 _pp_stat_reg(struct intel_dp *intel_dp)
559 : {
560 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
561 :
562 0 : if (IS_BROXTON(dev))
563 0 : return BXT_PP_STATUS(0);
564 0 : else if (HAS_PCH_SPLIT(dev))
565 0 : return PCH_PP_STATUS;
566 : else
567 0 : return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568 0 : }
569 :
570 : /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571 : This function only applicable when panel PM state is not to be tracked */
572 0 : static int edp_notify_handler(struct notifier_block *this, unsigned long code,
573 : void *unused)
574 : {
575 0 : struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
576 : edp_notifier);
577 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
578 0 : struct drm_i915_private *dev_priv = dev->dev_private;
579 :
580 0 : if (!is_edp(intel_dp) || code != SYS_RESTART)
581 0 : return 0;
582 :
583 0 : pps_lock(intel_dp);
584 :
585 0 : if (IS_VALLEYVIEW(dev)) {
586 0 : enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
587 : u32 pp_ctrl_reg, pp_div_reg;
588 : u32 pp_div;
589 :
590 0 : pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
591 0 : pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
592 0 : pp_div = I915_READ(pp_div_reg);
593 0 : pp_div &= PP_REFERENCE_DIVIDER_MASK;
594 :
595 : /* 0x1F write to PP_DIV_REG sets max cycle delay */
596 0 : I915_WRITE(pp_div_reg, pp_div | 0x1F);
597 0 : I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
598 0 : drm_msleep(intel_dp->panel_power_cycle_delay);
599 0 : }
600 :
601 0 : pps_unlock(intel_dp);
602 :
603 0 : return 0;
604 0 : }
605 :
606 0 : static bool edp_have_panel_power(struct intel_dp *intel_dp)
607 : {
608 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
609 0 : struct drm_i915_private *dev_priv = dev->dev_private;
610 :
611 : lockdep_assert_held(&dev_priv->pps_mutex);
612 :
613 0 : if (IS_VALLEYVIEW(dev) &&
614 0 : intel_dp->pps_pipe == INVALID_PIPE)
615 0 : return false;
616 :
617 0 : return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
618 0 : }
619 :
620 0 : static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
621 : {
622 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
623 0 : struct drm_i915_private *dev_priv = dev->dev_private;
624 :
625 : lockdep_assert_held(&dev_priv->pps_mutex);
626 :
627 0 : if (IS_VALLEYVIEW(dev) &&
628 0 : intel_dp->pps_pipe == INVALID_PIPE)
629 0 : return false;
630 :
631 0 : return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
632 0 : }
633 :
634 : static void
635 0 : intel_dp_check_edp(struct intel_dp *intel_dp)
636 : {
637 : #ifdef DRMDEBUG
638 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
639 : struct drm_i915_private *dev_priv = dev->dev_private;
640 : #endif
641 :
642 0 : if (!is_edp(intel_dp))
643 : return;
644 :
645 0 : if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
646 0 : WARN(1, "eDP powered off while attempting aux channel communication.\n");
647 : DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
648 : I915_READ(_pp_stat_reg(intel_dp)),
649 : I915_READ(_pp_ctrl_reg(intel_dp)));
650 0 : }
651 0 : }
652 :
653 : static uint32_t
654 0 : intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
655 : {
656 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
657 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
658 0 : struct drm_i915_private *dev_priv = dev->dev_private;
659 0 : uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
660 : uint32_t status;
661 : bool done;
662 :
663 : #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
664 0 : if (has_aux_irq && !cold)
665 0 : done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
666 : msecs_to_jiffies_timeout(10));
667 : else
668 0 : done = wait_for_atomic(C, 10) == 0;
669 0 : if (!done)
670 0 : DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
671 : has_aux_irq);
672 : #undef C
673 :
674 0 : return status;
675 : }
676 :
677 0 : static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
678 : {
679 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
680 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
681 :
682 : /*
683 : * The clock divider is based off the hrawclk, and would like to run at
684 : * 2MHz. So, take the hrawclk value and divide by 2 and use that
685 : */
686 0 : return index ? 0 : intel_hrawclk(dev) / 2;
687 : }
688 :
689 0 : static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
690 : {
691 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
693 0 : struct drm_i915_private *dev_priv = dev->dev_private;
694 :
695 0 : if (index)
696 0 : return 0;
697 :
698 0 : if (intel_dig_port->port == PORT_A) {
699 0 : return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
700 :
701 : } else {
702 0 : return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
703 : }
704 0 : }
705 :
706 0 : static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
707 : {
708 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
709 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
710 0 : struct drm_i915_private *dev_priv = dev->dev_private;
711 :
712 0 : if (intel_dig_port->port == PORT_A) {
713 0 : if (index)
714 0 : return 0;
715 0 : return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
716 0 : } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
717 : /* Workaround for non-ULT HSW */
718 0 : switch (index) {
719 0 : case 0: return 63;
720 0 : case 1: return 72;
721 0 : default: return 0;
722 : }
723 : } else {
724 0 : return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
725 : }
726 0 : }
727 :
728 0 : static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
729 : {
730 0 : return index ? 0 : 100;
731 : }
732 :
733 0 : static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
734 : {
735 : /*
736 : * SKL doesn't need us to program the AUX clock divider (Hardware will
737 : * derive the clock from CDCLK automatically). We still implement the
738 : * get_aux_clock_divider vfunc to plug-in into the existing code.
739 : */
740 0 : return index ? 0 : 1;
741 : }
742 :
743 0 : static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
744 : bool has_aux_irq,
745 : int send_bytes,
746 : uint32_t aux_clock_divider)
747 : {
748 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
749 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
750 : uint32_t precharge, timeout;
751 :
752 0 : if (IS_GEN6(dev))
753 0 : precharge = 3;
754 : else
755 : precharge = 5;
756 :
757 0 : if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
758 0 : timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
759 : else
760 : timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
761 :
762 0 : return DP_AUX_CH_CTL_SEND_BUSY |
763 0 : DP_AUX_CH_CTL_DONE |
764 0 : (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
765 0 : DP_AUX_CH_CTL_TIME_OUT_ERROR |
766 0 : timeout |
767 0 : DP_AUX_CH_CTL_RECEIVE_ERROR |
768 0 : (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
769 0 : (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
770 : (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
771 : }
772 :
773 0 : static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
774 : bool has_aux_irq,
775 : int send_bytes,
776 : uint32_t unused)
777 : {
778 0 : return DP_AUX_CH_CTL_SEND_BUSY |
779 0 : DP_AUX_CH_CTL_DONE |
780 0 : (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
781 0 : DP_AUX_CH_CTL_TIME_OUT_ERROR |
782 0 : DP_AUX_CH_CTL_TIME_OUT_1600us |
783 0 : DP_AUX_CH_CTL_RECEIVE_ERROR |
784 0 : (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
785 : DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
786 : }
787 :
788 : static int
789 0 : intel_dp_aux_ch(struct intel_dp *intel_dp,
790 : const uint8_t *send, int send_bytes,
791 : uint8_t *recv, int recv_size)
792 : {
793 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
794 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
795 0 : struct drm_i915_private *dev_priv = dev->dev_private;
796 0 : uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
797 0 : uint32_t ch_data = ch_ctl + 4;
798 : uint32_t aux_clock_divider;
799 : int i, ret, recv_bytes;
800 : uint32_t status;
801 : int try, clock = 0;
802 0 : bool has_aux_irq = HAS_AUX_IRQ(dev);
803 : bool vdd;
804 :
805 0 : pps_lock(intel_dp);
806 :
807 : /*
808 : * We will be called with VDD already enabled for dpcd/edid/oui reads.
809 : * In such cases we want to leave VDD enabled and it's up to upper layers
810 : * to turn it off. But for eg. i2c-dev access we need to turn it on/off
811 : * ourselves.
812 : */
813 0 : vdd = edp_panel_vdd_on(intel_dp);
814 :
815 : /* dp aux is extremely sensitive to irq latency, hence request the
816 : * lowest possible wakeup latency and so prevent the cpu from going into
817 : * deep sleep states.
818 : */
819 : pm_qos_update_request(&dev_priv->pm_qos, 0);
820 :
821 0 : intel_dp_check_edp(intel_dp);
822 :
823 : /* Try to wait for any previous AUX channel activity */
824 0 : for (try = 0; try < 3; try++) {
825 0 : status = I915_READ_NOTRACE(ch_ctl);
826 0 : if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
827 : break;
828 0 : drm_msleep(1);
829 : }
830 :
831 0 : if (try == 3) {
832 : static u32 last_status = -1;
833 0 : const u32 status = I915_READ(ch_ctl);
834 :
835 0 : if (status != last_status) {
836 0 : WARN(1, "dp_aux_ch not started status 0x%08x\n",
837 : status);
838 0 : last_status = status;
839 0 : }
840 :
841 : ret = -EBUSY;
842 : goto out;
843 : }
844 :
845 : /* Only 5 data registers! */
846 0 : if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
847 : ret = -E2BIG;
848 0 : goto out;
849 : }
850 :
851 0 : while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
852 0 : u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
853 : has_aux_irq,
854 : send_bytes,
855 : aux_clock_divider);
856 :
857 : /* Must try at least 3 times according to DP spec */
858 0 : for (try = 0; try < 5; try++) {
859 : /* Load the send data into the aux channel data registers */
860 0 : for (i = 0; i < send_bytes; i += 4)
861 0 : I915_WRITE(ch_data + i,
862 : intel_dp_pack_aux(send + i,
863 : send_bytes - i));
864 :
865 : /* Send the command and wait for it to complete */
866 0 : I915_WRITE(ch_ctl, send_ctl);
867 :
868 0 : status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
869 :
870 : /* Clear done status and any errors */
871 0 : I915_WRITE(ch_ctl,
872 : status |
873 : DP_AUX_CH_CTL_DONE |
874 : DP_AUX_CH_CTL_TIME_OUT_ERROR |
875 : DP_AUX_CH_CTL_RECEIVE_ERROR);
876 :
877 0 : if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
878 : continue;
879 :
880 : /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
881 : * 400us delay required for errors and timeouts
882 : * Timeout errors from the HW already meet this
883 : * requirement so skip to next iteration
884 : */
885 0 : if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
886 0 : usleep_range(400, 500);
887 0 : continue;
888 : }
889 0 : if (status & DP_AUX_CH_CTL_DONE)
890 0 : goto done;
891 : }
892 0 : }
893 :
894 0 : if ((status & DP_AUX_CH_CTL_DONE) == 0) {
895 0 : DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
896 : ret = -EBUSY;
897 0 : goto out;
898 : }
899 :
900 : done:
901 : /* Check for timeout or receive error.
902 : * Timeouts occur when the sink is not connected
903 : */
904 0 : if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
905 0 : DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
906 : ret = -EIO;
907 0 : goto out;
908 : }
909 :
910 : /* Timeouts occur when the device isn't connected, so they're
911 : * "normal" -- don't fill the kernel log with these */
912 0 : if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
913 : DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
914 : ret = -ETIMEDOUT;
915 0 : goto out;
916 : }
917 :
918 : /* Unload any bytes sent back from the other side */
919 0 : recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
920 : DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
921 0 : if (recv_bytes > recv_size)
922 0 : recv_bytes = recv_size;
923 :
924 0 : for (i = 0; i < recv_bytes; i += 4)
925 0 : intel_dp_unpack_aux(I915_READ(ch_data + i),
926 0 : recv + i, recv_bytes - i);
927 :
928 0 : ret = recv_bytes;
929 : out:
930 : pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
931 :
932 0 : if (vdd)
933 0 : edp_panel_vdd_off(intel_dp, false);
934 :
935 0 : pps_unlock(intel_dp);
936 :
937 0 : return ret;
938 0 : }
939 :
940 : #define BARE_ADDRESS_SIZE 3
941 : #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
942 : static ssize_t
943 0 : intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
944 : {
945 0 : struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
946 0 : uint8_t txbuf[20], rxbuf[20];
947 : size_t txsize, rxsize;
948 : int ret;
949 :
950 0 : txbuf[0] = (msg->request << 4) |
951 0 : ((msg->address >> 16) & 0xf);
952 0 : txbuf[1] = (msg->address >> 8) & 0xff;
953 0 : txbuf[2] = msg->address & 0xff;
954 0 : txbuf[3] = msg->size - 1;
955 :
956 0 : switch (msg->request & ~DP_AUX_I2C_MOT) {
957 : case DP_AUX_NATIVE_WRITE:
958 : case DP_AUX_I2C_WRITE:
959 : case DP_AUX_I2C_WRITE_STATUS_UPDATE:
960 0 : txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
961 : rxsize = 2; /* 0 or 1 data bytes */
962 :
963 0 : if (WARN_ON(txsize > 20))
964 0 : return -E2BIG;
965 :
966 0 : memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
967 :
968 0 : ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
969 0 : if (ret > 0) {
970 0 : msg->reply = rxbuf[0] >> 4;
971 :
972 0 : if (ret > 1) {
973 : /* Number of bytes written in a short write. */
974 0 : ret = clamp_t(int, rxbuf[1], 0, msg->size);
975 0 : } else {
976 : /* Return payload size. */
977 0 : ret = msg->size;
978 : }
979 : }
980 : break;
981 :
982 : case DP_AUX_NATIVE_READ:
983 : case DP_AUX_I2C_READ:
984 0 : txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
985 0 : rxsize = msg->size + 1;
986 :
987 0 : if (WARN_ON(rxsize > 20))
988 0 : return -E2BIG;
989 :
990 0 : ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
991 0 : if (ret > 0) {
992 0 : msg->reply = rxbuf[0] >> 4;
993 : /*
994 : * Assume happy day, and copy the data. The caller is
995 : * expected to check msg->reply before touching it.
996 : *
997 : * Return payload size.
998 : */
999 0 : ret--;
1000 0 : memcpy(msg->buffer, rxbuf + 1, ret);
1001 0 : }
1002 : break;
1003 :
1004 : default:
1005 : ret = -EINVAL;
1006 0 : break;
1007 : }
1008 :
1009 0 : return ret;
1010 0 : }
1011 :
1012 : static void
1013 0 : intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1014 : {
1015 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
1016 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1017 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1018 0 : enum port port = intel_dig_port->port;
1019 0 : struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1020 : const char *name = NULL;
1021 : uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1022 : int ret;
1023 :
1024 : /* On SKL we don't have Aux for port E so we rely on VBT to set
1025 : * a proper alternate aux channel.
1026 : */
1027 0 : if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && port == PORT_E) {
1028 0 : switch (info->alternate_aux_channel) {
1029 : case DP_AUX_B:
1030 : porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1031 0 : break;
1032 : case DP_AUX_C:
1033 : porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1034 0 : break;
1035 : case DP_AUX_D:
1036 : porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1037 0 : break;
1038 : case DP_AUX_A:
1039 : default:
1040 : porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1041 0 : }
1042 : }
1043 :
1044 0 : switch (port) {
1045 : case PORT_A:
1046 0 : intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1047 : name = "DPDDC-A";
1048 0 : break;
1049 : case PORT_B:
1050 0 : intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1051 : name = "DPDDC-B";
1052 0 : break;
1053 : case PORT_C:
1054 0 : intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1055 : name = "DPDDC-C";
1056 0 : break;
1057 : case PORT_D:
1058 0 : intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1059 : name = "DPDDC-D";
1060 0 : break;
1061 : case PORT_E:
1062 0 : intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1063 : name = "DPDDC-E";
1064 0 : break;
1065 : default:
1066 0 : BUG();
1067 : }
1068 :
1069 : /*
1070 : * The AUX_CTL register is usually DP_CTL + 0x10.
1071 : *
1072 : * On Haswell and Broadwell though:
1073 : * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1074 : * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1075 : *
1076 : * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1077 : */
1078 0 : if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1079 0 : intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1080 :
1081 0 : intel_dp->aux.name = name;
1082 0 : intel_dp->aux.dev = dev->dev;
1083 0 : intel_dp->aux.transfer = intel_dp_aux_transfer;
1084 :
1085 : #ifdef __linux__
1086 : DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1087 : connector->base.kdev->kobj.name);
1088 : #endif
1089 :
1090 0 : ret = drm_dp_aux_register(&intel_dp->aux);
1091 0 : if (ret < 0) {
1092 0 : DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1093 : name, ret);
1094 0 : return;
1095 : }
1096 :
1097 : #ifdef __linux__
1098 : ret = sysfs_create_link(&connector->base.kdev->kobj,
1099 : &intel_dp->aux.ddc.dev.kobj,
1100 : intel_dp->aux.ddc.dev.kobj.name);
1101 : if (ret < 0) {
1102 : DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1103 : drm_dp_aux_unregister(&intel_dp->aux);
1104 : }
1105 : #endif
1106 0 : }
1107 :
1108 : static void
1109 0 : intel_dp_connector_unregister(struct intel_connector *intel_connector)
1110 : {
1111 : #ifdef __linux__
1112 : struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1113 :
1114 : if (!intel_connector->mst_port)
1115 : sysfs_remove_link(&intel_connector->base.kdev->kobj,
1116 : intel_dp->aux.ddc.dev.kobj.name);
1117 : #endif
1118 0 : intel_connector_unregister(intel_connector);
1119 0 : }
1120 :
1121 : static void
1122 0 : skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1123 : {
1124 : u32 ctrl1;
1125 :
1126 0 : memset(&pipe_config->dpll_hw_state, 0,
1127 : sizeof(pipe_config->dpll_hw_state));
1128 :
1129 0 : pipe_config->ddi_pll_sel = SKL_DPLL0;
1130 0 : pipe_config->dpll_hw_state.cfgcr1 = 0;
1131 0 : pipe_config->dpll_hw_state.cfgcr2 = 0;
1132 :
1133 : ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1134 0 : switch (pipe_config->port_clock / 2) {
1135 : case 81000:
1136 : ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1137 : SKL_DPLL0);
1138 0 : break;
1139 : case 135000:
1140 : ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1141 : SKL_DPLL0);
1142 0 : break;
1143 : case 270000:
1144 : ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1145 : SKL_DPLL0);
1146 0 : break;
1147 : case 162000:
1148 : ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1149 : SKL_DPLL0);
1150 0 : break;
1151 : /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1152 : results in CDCLK change. Need to handle the change of CDCLK by
1153 : disabling pipes and re-enabling them */
1154 : case 108000:
1155 : ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1156 : SKL_DPLL0);
1157 0 : break;
1158 : case 216000:
1159 : ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1160 : SKL_DPLL0);
1161 0 : break;
1162 :
1163 : }
1164 0 : pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1165 0 : }
1166 :
1167 : void
1168 0 : hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1169 : {
1170 0 : memset(&pipe_config->dpll_hw_state, 0,
1171 : sizeof(pipe_config->dpll_hw_state));
1172 :
1173 0 : switch (pipe_config->port_clock / 2) {
1174 : case 81000:
1175 0 : pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1176 0 : break;
1177 : case 135000:
1178 0 : pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1179 0 : break;
1180 : case 270000:
1181 0 : pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1182 0 : break;
1183 : }
1184 0 : }
1185 :
1186 : static int
1187 0 : intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1188 : {
1189 0 : if (intel_dp->num_sink_rates) {
1190 0 : *sink_rates = intel_dp->sink_rates;
1191 0 : return intel_dp->num_sink_rates;
1192 : }
1193 :
1194 0 : *sink_rates = default_rates;
1195 :
1196 0 : return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1197 0 : }
1198 :
1199 0 : static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1200 : {
1201 : /* WaDisableHBR2:skl */
1202 0 : if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1203 0 : return false;
1204 :
1205 0 : if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1206 0 : (INTEL_INFO(dev)->gen >= 9))
1207 0 : return true;
1208 : else
1209 0 : return false;
1210 0 : }
1211 :
1212 : static int
1213 0 : intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1214 : {
1215 : int size;
1216 :
1217 0 : if (IS_BROXTON(dev)) {
1218 0 : *source_rates = bxt_rates;
1219 : size = ARRAY_SIZE(bxt_rates);
1220 0 : } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1221 0 : *source_rates = skl_rates;
1222 : size = ARRAY_SIZE(skl_rates);
1223 0 : } else {
1224 0 : *source_rates = default_rates;
1225 : size = ARRAY_SIZE(default_rates);
1226 : }
1227 :
1228 : /* This depends on the fact that 5.4 is last value in the array */
1229 0 : if (!intel_dp_source_supports_hbr2(dev))
1230 0 : size--;
1231 :
1232 0 : return size;
1233 : }
1234 :
1235 : static void
1236 0 : intel_dp_set_clock(struct intel_encoder *encoder,
1237 : struct intel_crtc_state *pipe_config)
1238 : {
1239 0 : struct drm_device *dev = encoder->base.dev;
1240 : const struct dp_link_dpll *divisor = NULL;
1241 : int i, count = 0;
1242 :
1243 0 : if (IS_G4X(dev)) {
1244 : divisor = gen4_dpll;
1245 : count = ARRAY_SIZE(gen4_dpll);
1246 0 : } else if (HAS_PCH_SPLIT(dev)) {
1247 : divisor = pch_dpll;
1248 : count = ARRAY_SIZE(pch_dpll);
1249 0 : } else if (IS_CHERRYVIEW(dev)) {
1250 : divisor = chv_dpll;
1251 : count = ARRAY_SIZE(chv_dpll);
1252 0 : } else if (IS_VALLEYVIEW(dev)) {
1253 : divisor = vlv_dpll;
1254 : count = ARRAY_SIZE(vlv_dpll);
1255 0 : }
1256 :
1257 0 : if (divisor && count) {
1258 0 : for (i = 0; i < count; i++) {
1259 0 : if (pipe_config->port_clock == divisor[i].clock) {
1260 0 : pipe_config->dpll = divisor[i].dpll;
1261 0 : pipe_config->clock_set = true;
1262 0 : break;
1263 : }
1264 : }
1265 : }
1266 0 : }
1267 :
1268 0 : static int intersect_rates(const int *source_rates, int source_len,
1269 : const int *sink_rates, int sink_len,
1270 : int *common_rates)
1271 : {
1272 : int i = 0, j = 0, k = 0;
1273 :
1274 0 : while (i < source_len && j < sink_len) {
1275 0 : if (source_rates[i] == sink_rates[j]) {
1276 0 : if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1277 0 : return k;
1278 0 : common_rates[k] = source_rates[i];
1279 0 : ++k;
1280 0 : ++i;
1281 0 : ++j;
1282 0 : } else if (source_rates[i] < sink_rates[j]) {
1283 0 : ++i;
1284 0 : } else {
1285 0 : ++j;
1286 : }
1287 : }
1288 0 : return k;
1289 0 : }
1290 :
1291 0 : static int intel_dp_common_rates(struct intel_dp *intel_dp,
1292 : int *common_rates)
1293 : {
1294 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
1295 0 : const int *source_rates, *sink_rates;
1296 : int source_len, sink_len;
1297 :
1298 0 : sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1299 0 : source_len = intel_dp_source_rates(dev, &source_rates);
1300 :
1301 0 : return intersect_rates(source_rates, source_len,
1302 0 : sink_rates, sink_len,
1303 : common_rates);
1304 0 : }
1305 :
1306 : static void snprintf_int_array(char *str, size_t len,
1307 : const int *array, int nelem)
1308 : {
1309 : int i;
1310 :
1311 : str[0] = '\0';
1312 :
1313 : for (i = 0; i < nelem; i++) {
1314 : int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1315 : if (r >= len)
1316 : return;
1317 : str += r;
1318 : len -= r;
1319 : }
1320 : }
1321 :
1322 0 : static void intel_dp_print_rates(struct intel_dp *intel_dp)
1323 : {
1324 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
1325 : const int *source_rates, *sink_rates;
1326 : int source_len, sink_len, common_len;
1327 : int common_rates[DP_MAX_SUPPORTED_RATES];
1328 : char str[128]; /* FIXME: too big for stack? */
1329 :
1330 : if ((drm_debug & DRM_UT_KMS) == 0)
1331 : return;
1332 :
1333 : source_len = intel_dp_source_rates(dev, &source_rates);
1334 : snprintf_int_array(str, sizeof(str), source_rates, source_len);
1335 : DRM_DEBUG_KMS("source rates: %s\n", str);
1336 :
1337 : sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1338 : snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1339 : DRM_DEBUG_KMS("sink rates: %s\n", str);
1340 :
1341 : common_len = intel_dp_common_rates(intel_dp, common_rates);
1342 : snprintf_int_array(str, sizeof(str), common_rates, common_len);
1343 : DRM_DEBUG_KMS("common rates: %s\n", str);
1344 0 : }
1345 :
1346 0 : static int rate_to_index(int find, const int *rates)
1347 : {
1348 : int i = 0;
1349 :
1350 0 : for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1351 0 : if (find == rates[i])
1352 : break;
1353 :
1354 0 : return i;
1355 : }
1356 :
1357 : int
1358 0 : intel_dp_max_link_rate(struct intel_dp *intel_dp)
1359 : {
1360 0 : int rates[DP_MAX_SUPPORTED_RATES] = {};
1361 : int len;
1362 :
1363 0 : len = intel_dp_common_rates(intel_dp, rates);
1364 0 : if (WARN_ON(len <= 0))
1365 0 : return 162000;
1366 :
1367 0 : return rates[rate_to_index(0, rates) - 1];
1368 0 : }
1369 :
1370 0 : int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1371 : {
1372 0 : return rate_to_index(rate, intel_dp->sink_rates);
1373 : }
1374 :
1375 0 : static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1376 : uint8_t *link_bw, uint8_t *rate_select)
1377 : {
1378 0 : if (intel_dp->num_sink_rates) {
1379 0 : *link_bw = 0;
1380 0 : *rate_select =
1381 0 : intel_dp_rate_select(intel_dp, port_clock);
1382 0 : } else {
1383 0 : *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1384 0 : *rate_select = 0;
1385 : }
1386 0 : }
1387 :
1388 : bool
1389 0 : intel_dp_compute_config(struct intel_encoder *encoder,
1390 : struct intel_crtc_state *pipe_config)
1391 : {
1392 0 : struct drm_device *dev = encoder->base.dev;
1393 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1394 0 : struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1395 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1396 0 : enum port port = dp_to_dig_port(intel_dp)->port;
1397 0 : struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1398 0 : struct intel_connector *intel_connector = intel_dp->attached_connector;
1399 : int lane_count, clock;
1400 : int min_lane_count = 1;
1401 0 : int max_lane_count = intel_dp_max_lane_count(intel_dp);
1402 : /* Conveniently, the link BW constants become indices with a shift...*/
1403 : int min_clock = 0;
1404 : int max_clock;
1405 : int bpp, mode_rate;
1406 : int link_avail, link_clock;
1407 0 : int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1408 : int common_len;
1409 0 : uint8_t link_bw, rate_select;
1410 :
1411 0 : common_len = intel_dp_common_rates(intel_dp, common_rates);
1412 :
1413 : /* No common link rates between source and sink */
1414 0 : WARN_ON(common_len <= 0);
1415 :
1416 0 : max_clock = common_len - 1;
1417 :
1418 0 : if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1419 0 : pipe_config->has_pch_encoder = true;
1420 :
1421 0 : pipe_config->has_dp_encoder = true;
1422 0 : pipe_config->has_drrs = false;
1423 0 : pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1424 :
1425 0 : if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1426 0 : intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1427 : adjusted_mode);
1428 :
1429 0 : if (INTEL_INFO(dev)->gen >= 9) {
1430 : int ret;
1431 0 : ret = skl_update_scaler_crtc(pipe_config);
1432 0 : if (ret)
1433 0 : return ret;
1434 0 : }
1435 :
1436 0 : if (!HAS_PCH_SPLIT(dev))
1437 0 : intel_gmch_panel_fitting(intel_crtc, pipe_config,
1438 : intel_connector->panel.fitting_mode);
1439 : else
1440 0 : intel_pch_panel_fitting(intel_crtc, pipe_config,
1441 : intel_connector->panel.fitting_mode);
1442 : }
1443 :
1444 0 : if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1445 0 : return false;
1446 :
1447 : DRM_DEBUG_KMS("DP link computation with max lane count %i "
1448 : "max bw %d pixel clock %iKHz\n",
1449 : max_lane_count, common_rates[max_clock],
1450 : adjusted_mode->crtc_clock);
1451 :
1452 : /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1453 : * bpc in between. */
1454 0 : bpp = pipe_config->pipe_bpp;
1455 0 : if (is_edp(intel_dp)) {
1456 :
1457 : /* Get bpp from vbt only for panels that dont have bpp in edid */
1458 0 : if (intel_connector->base.display_info.bpc == 0 &&
1459 0 : (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1460 : DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1461 : dev_priv->vbt.edp_bpp);
1462 : bpp = dev_priv->vbt.edp_bpp;
1463 0 : }
1464 :
1465 : /*
1466 : * Use the maximum clock and number of lanes the eDP panel
1467 : * advertizes being capable of. The panels are generally
1468 : * designed to support only a single clock and lane
1469 : * configuration, and typically these values correspond to the
1470 : * native resolution of the panel.
1471 : */
1472 : min_lane_count = max_lane_count;
1473 : min_clock = max_clock;
1474 0 : }
1475 :
1476 0 : for (; bpp >= 6*3; bpp -= 2*3) {
1477 0 : mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1478 : bpp);
1479 :
1480 0 : for (clock = min_clock; clock <= max_clock; clock++) {
1481 0 : for (lane_count = min_lane_count;
1482 0 : lane_count <= max_lane_count;
1483 0 : lane_count <<= 1) {
1484 :
1485 0 : link_clock = common_rates[clock];
1486 0 : link_avail = intel_dp_max_data_rate(link_clock,
1487 : lane_count);
1488 :
1489 0 : if (mode_rate <= link_avail) {
1490 : goto found;
1491 : }
1492 : }
1493 : }
1494 : }
1495 :
1496 0 : return false;
1497 :
1498 : found:
1499 0 : if (intel_dp->color_range_auto) {
1500 : /*
1501 : * See:
1502 : * CEA-861-E - 5.1 Default Encoding Parameters
1503 : * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1504 : */
1505 0 : pipe_config->limited_color_range =
1506 0 : bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1507 0 : } else {
1508 0 : pipe_config->limited_color_range =
1509 0 : intel_dp->limited_color_range;
1510 : }
1511 :
1512 0 : pipe_config->lane_count = lane_count;
1513 :
1514 0 : pipe_config->pipe_bpp = bpp;
1515 0 : pipe_config->port_clock = common_rates[clock];
1516 :
1517 0 : intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1518 : &link_bw, &rate_select);
1519 :
1520 : DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1521 : link_bw, rate_select, pipe_config->lane_count,
1522 : pipe_config->port_clock, bpp);
1523 : DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1524 : mode_rate, link_avail);
1525 :
1526 0 : intel_link_compute_m_n(bpp, lane_count,
1527 0 : adjusted_mode->crtc_clock,
1528 0 : pipe_config->port_clock,
1529 0 : &pipe_config->dp_m_n);
1530 :
1531 0 : if (intel_connector->panel.downclock_mode != NULL &&
1532 0 : dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1533 0 : pipe_config->has_drrs = true;
1534 0 : intel_link_compute_m_n(bpp, lane_count,
1535 0 : intel_connector->panel.downclock_mode->clock,
1536 0 : pipe_config->port_clock,
1537 0 : &pipe_config->dp_m2_n2);
1538 0 : }
1539 :
1540 0 : if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1541 0 : skl_edp_set_pll_config(pipe_config);
1542 0 : else if (IS_BROXTON(dev))
1543 : /* handled in ddi */;
1544 0 : else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1545 0 : hsw_dp_set_ddi_pll_sel(pipe_config);
1546 : else
1547 0 : intel_dp_set_clock(encoder, pipe_config);
1548 :
1549 0 : return true;
1550 0 : }
1551 :
1552 0 : static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1553 : {
1554 0 : struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1555 0 : struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1556 0 : struct drm_device *dev = crtc->base.dev;
1557 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1558 : u32 dpa_ctl;
1559 :
1560 : DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1561 : crtc->config->port_clock);
1562 0 : dpa_ctl = I915_READ(DP_A);
1563 0 : dpa_ctl &= ~DP_PLL_FREQ_MASK;
1564 :
1565 0 : if (crtc->config->port_clock == 162000) {
1566 : /* For a long time we've carried around a ILK-DevA w/a for the
1567 : * 160MHz clock. If we're really unlucky, it's still required.
1568 : */
1569 : DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1570 0 : dpa_ctl |= DP_PLL_FREQ_160MHZ;
1571 0 : intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1572 0 : } else {
1573 : dpa_ctl |= DP_PLL_FREQ_270MHZ;
1574 0 : intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1575 : }
1576 :
1577 0 : I915_WRITE(DP_A, dpa_ctl);
1578 :
1579 0 : POSTING_READ(DP_A);
1580 0 : udelay(500);
1581 0 : }
1582 :
1583 0 : void intel_dp_set_link_params(struct intel_dp *intel_dp,
1584 : const struct intel_crtc_state *pipe_config)
1585 : {
1586 0 : intel_dp->link_rate = pipe_config->port_clock;
1587 0 : intel_dp->lane_count = pipe_config->lane_count;
1588 0 : }
1589 :
1590 0 : static void intel_dp_prepare(struct intel_encoder *encoder)
1591 : {
1592 0 : struct drm_device *dev = encoder->base.dev;
1593 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1594 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1595 0 : enum port port = dp_to_dig_port(intel_dp)->port;
1596 0 : struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1597 0 : const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1598 :
1599 0 : intel_dp_set_link_params(intel_dp, crtc->config);
1600 :
1601 : /*
1602 : * There are four kinds of DP registers:
1603 : *
1604 : * IBX PCH
1605 : * SNB CPU
1606 : * IVB CPU
1607 : * CPT PCH
1608 : *
1609 : * IBX PCH and CPU are the same for almost everything,
1610 : * except that the CPU DP PLL is configured in this
1611 : * register
1612 : *
1613 : * CPT PCH is quite different, having many bits moved
1614 : * to the TRANS_DP_CTL register instead. That
1615 : * configuration happens (oddly) in ironlake_pch_enable
1616 : */
1617 :
1618 : /* Preserve the BIOS-computed detected bit. This is
1619 : * supposed to be read-only.
1620 : */
1621 0 : intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1622 :
1623 : /* Handle DP bits in common between all three register formats */
1624 0 : intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1625 0 : intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1626 :
1627 0 : if (crtc->config->has_audio)
1628 0 : intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1629 :
1630 : /* Split out the IBX/CPU vs CPT settings */
1631 :
1632 0 : if (IS_GEN7(dev) && port == PORT_A) {
1633 0 : if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1634 0 : intel_dp->DP |= DP_SYNC_HS_HIGH;
1635 0 : if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1636 0 : intel_dp->DP |= DP_SYNC_VS_HIGH;
1637 0 : intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1638 :
1639 0 : if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1640 0 : intel_dp->DP |= DP_ENHANCED_FRAMING;
1641 :
1642 0 : intel_dp->DP |= crtc->pipe << 29;
1643 0 : } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1644 : u32 trans_dp;
1645 :
1646 0 : intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1647 :
1648 0 : trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1649 0 : if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1650 0 : trans_dp |= TRANS_DP_ENH_FRAMING;
1651 : else
1652 0 : trans_dp &= ~TRANS_DP_ENH_FRAMING;
1653 0 : I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1654 0 : } else {
1655 0 : if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1656 0 : crtc->config->limited_color_range)
1657 0 : intel_dp->DP |= DP_COLOR_RANGE_16_235;
1658 :
1659 0 : if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1660 0 : intel_dp->DP |= DP_SYNC_HS_HIGH;
1661 0 : if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1662 0 : intel_dp->DP |= DP_SYNC_VS_HIGH;
1663 0 : intel_dp->DP |= DP_LINK_TRAIN_OFF;
1664 :
1665 0 : if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1666 0 : intel_dp->DP |= DP_ENHANCED_FRAMING;
1667 :
1668 0 : if (IS_CHERRYVIEW(dev))
1669 0 : intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1670 0 : else if (crtc->pipe == PIPE_B)
1671 0 : intel_dp->DP |= DP_PIPEB_SELECT;
1672 : }
1673 0 : }
1674 :
1675 : #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1676 : #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1677 :
1678 : #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1679 : #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1680 :
1681 : #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1682 : #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1683 :
1684 0 : static void wait_panel_status(struct intel_dp *intel_dp,
1685 : u32 mask,
1686 : u32 value)
1687 : {
1688 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
1689 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1690 : u32 pp_stat_reg, pp_ctrl_reg;
1691 :
1692 : lockdep_assert_held(&dev_priv->pps_mutex);
1693 :
1694 0 : pp_stat_reg = _pp_stat_reg(intel_dp);
1695 0 : pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1696 :
1697 : DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1698 : mask, value,
1699 : I915_READ(pp_stat_reg),
1700 : I915_READ(pp_ctrl_reg));
1701 :
1702 0 : if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1703 0 : DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1704 : I915_READ(pp_stat_reg),
1705 : I915_READ(pp_ctrl_reg));
1706 0 : }
1707 :
1708 : DRM_DEBUG_KMS("Wait complete\n");
1709 0 : }
1710 :
1711 0 : static void wait_panel_on(struct intel_dp *intel_dp)
1712 : {
1713 : DRM_DEBUG_KMS("Wait for panel power on\n");
1714 0 : wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1715 0 : }
1716 :
1717 0 : static void wait_panel_off(struct intel_dp *intel_dp)
1718 : {
1719 : DRM_DEBUG_KMS("Wait for panel power off time\n");
1720 0 : wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1721 0 : }
1722 :
1723 0 : static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1724 : {
1725 : DRM_DEBUG_KMS("Wait for panel power cycle\n");
1726 :
1727 : /* When we disable the VDD override bit last we have to do the manual
1728 : * wait. */
1729 0 : wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1730 0 : intel_dp->panel_power_cycle_delay);
1731 :
1732 0 : wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1733 0 : }
1734 :
1735 0 : static void wait_backlight_on(struct intel_dp *intel_dp)
1736 : {
1737 0 : wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1738 0 : intel_dp->backlight_on_delay);
1739 0 : }
1740 :
1741 0 : static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1742 : {
1743 0 : wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1744 0 : intel_dp->backlight_off_delay);
1745 0 : }
1746 :
1747 : /* Read the current pp_control value, unlocking the register if it
1748 : * is locked
1749 : */
1750 :
1751 0 : static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1752 : {
1753 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
1754 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1755 : u32 control;
1756 :
1757 : lockdep_assert_held(&dev_priv->pps_mutex);
1758 :
1759 0 : control = I915_READ(_pp_ctrl_reg(intel_dp));
1760 0 : if (!IS_BROXTON(dev)) {
1761 0 : control &= ~PANEL_UNLOCK_MASK;
1762 0 : control |= PANEL_UNLOCK_REGS;
1763 0 : }
1764 0 : return control;
1765 : }
1766 :
1767 : /*
1768 : * Must be paired with edp_panel_vdd_off().
1769 : * Must hold pps_mutex around the whole on/off sequence.
1770 : * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1771 : */
1772 0 : static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1773 : {
1774 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
1775 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1776 0 : struct intel_encoder *intel_encoder = &intel_dig_port->base;
1777 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1778 : enum intel_display_power_domain power_domain;
1779 : u32 pp;
1780 : u32 pp_stat_reg, pp_ctrl_reg;
1781 0 : bool need_to_disable = !intel_dp->want_panel_vdd;
1782 :
1783 : lockdep_assert_held(&dev_priv->pps_mutex);
1784 :
1785 0 : if (!is_edp(intel_dp))
1786 0 : return false;
1787 :
1788 0 : cancel_delayed_work(&intel_dp->panel_vdd_work);
1789 0 : intel_dp->want_panel_vdd = true;
1790 :
1791 0 : if (edp_have_panel_vdd(intel_dp))
1792 0 : return need_to_disable;
1793 :
1794 0 : power_domain = intel_display_port_aux_power_domain(intel_encoder);
1795 0 : intel_display_power_get(dev_priv, power_domain);
1796 :
1797 : DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1798 : port_name(intel_dig_port->port));
1799 :
1800 0 : if (!edp_have_panel_power(intel_dp))
1801 0 : wait_panel_power_cycle(intel_dp);
1802 :
1803 0 : pp = ironlake_get_pp_control(intel_dp);
1804 0 : pp |= EDP_FORCE_VDD;
1805 :
1806 0 : pp_stat_reg = _pp_stat_reg(intel_dp);
1807 0 : pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1808 :
1809 0 : I915_WRITE(pp_ctrl_reg, pp);
1810 0 : POSTING_READ(pp_ctrl_reg);
1811 : DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1812 : I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1813 : /*
1814 : * If the panel wasn't on, delay before accessing aux channel
1815 : */
1816 0 : if (!edp_have_panel_power(intel_dp)) {
1817 : DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1818 : port_name(intel_dig_port->port));
1819 0 : drm_msleep(intel_dp->panel_power_up_delay);
1820 0 : }
1821 :
1822 0 : return need_to_disable;
1823 0 : }
1824 :
1825 : /*
1826 : * Must be paired with intel_edp_panel_vdd_off() or
1827 : * intel_edp_panel_off().
1828 : * Nested calls to these functions are not allowed since
1829 : * we drop the lock. Caller must use some higher level
1830 : * locking to prevent nested calls from other threads.
1831 : */
1832 0 : void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1833 : {
1834 : bool vdd;
1835 :
1836 0 : if (!is_edp(intel_dp))
1837 0 : return;
1838 :
1839 0 : pps_lock(intel_dp);
1840 0 : vdd = edp_panel_vdd_on(intel_dp);
1841 0 : pps_unlock(intel_dp);
1842 :
1843 0 : I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1844 : port_name(dp_to_dig_port(intel_dp)->port));
1845 0 : }
1846 :
1847 0 : static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1848 : {
1849 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
1850 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1851 : struct intel_digital_port *intel_dig_port =
1852 0 : dp_to_dig_port(intel_dp);
1853 0 : struct intel_encoder *intel_encoder = &intel_dig_port->base;
1854 : enum intel_display_power_domain power_domain;
1855 : u32 pp;
1856 : u32 pp_stat_reg, pp_ctrl_reg;
1857 :
1858 : lockdep_assert_held(&dev_priv->pps_mutex);
1859 :
1860 0 : WARN_ON(intel_dp->want_panel_vdd);
1861 :
1862 0 : if (!edp_have_panel_vdd(intel_dp))
1863 0 : return;
1864 :
1865 : DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1866 : port_name(intel_dig_port->port));
1867 :
1868 0 : pp = ironlake_get_pp_control(intel_dp);
1869 0 : pp &= ~EDP_FORCE_VDD;
1870 :
1871 0 : pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1872 0 : pp_stat_reg = _pp_stat_reg(intel_dp);
1873 :
1874 0 : I915_WRITE(pp_ctrl_reg, pp);
1875 0 : POSTING_READ(pp_ctrl_reg);
1876 :
1877 : /* Make sure sequencer is idle before allowing subsequent activity */
1878 : DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1879 : I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1880 :
1881 0 : if ((pp & POWER_TARGET_ON) == 0)
1882 0 : intel_dp->last_power_cycle = jiffies;
1883 :
1884 0 : power_domain = intel_display_port_aux_power_domain(intel_encoder);
1885 0 : intel_display_power_put(dev_priv, power_domain);
1886 0 : }
1887 :
1888 0 : static void edp_panel_vdd_work(struct work_struct *__work)
1889 : {
1890 0 : struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1891 : struct intel_dp, panel_vdd_work);
1892 :
1893 0 : pps_lock(intel_dp);
1894 0 : if (!intel_dp->want_panel_vdd)
1895 0 : edp_panel_vdd_off_sync(intel_dp);
1896 0 : pps_unlock(intel_dp);
1897 0 : }
1898 :
1899 0 : static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1900 : {
1901 : unsigned long delay;
1902 :
1903 : /*
1904 : * Queue the timer to fire a long time from now (relative to the power
1905 : * down delay) to keep the panel power up across a sequence of
1906 : * operations.
1907 : */
1908 0 : delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1909 0 : schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1910 0 : }
1911 :
1912 : /*
1913 : * Must be paired with edp_panel_vdd_on().
1914 : * Must hold pps_mutex around the whole on/off sequence.
1915 : * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1916 : */
1917 0 : static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1918 : {
1919 : struct drm_i915_private *dev_priv =
1920 0 : intel_dp_to_dev(intel_dp)->dev_private;
1921 :
1922 : lockdep_assert_held(&dev_priv->pps_mutex);
1923 :
1924 0 : if (!is_edp(intel_dp))
1925 0 : return;
1926 :
1927 0 : I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1928 : port_name(dp_to_dig_port(intel_dp)->port));
1929 :
1930 0 : intel_dp->want_panel_vdd = false;
1931 :
1932 0 : if (sync)
1933 0 : edp_panel_vdd_off_sync(intel_dp);
1934 : else
1935 0 : edp_panel_vdd_schedule_off(intel_dp);
1936 0 : }
1937 :
1938 0 : static void edp_panel_on(struct intel_dp *intel_dp)
1939 : {
1940 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
1941 0 : struct drm_i915_private *dev_priv = dev->dev_private;
1942 : u32 pp;
1943 : u32 pp_ctrl_reg;
1944 :
1945 : lockdep_assert_held(&dev_priv->pps_mutex);
1946 :
1947 0 : if (!is_edp(intel_dp))
1948 0 : return;
1949 :
1950 : DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1951 : port_name(dp_to_dig_port(intel_dp)->port));
1952 :
1953 0 : if (WARN(edp_have_panel_power(intel_dp),
1954 : "eDP port %c panel power already on\n",
1955 : port_name(dp_to_dig_port(intel_dp)->port)))
1956 0 : return;
1957 :
1958 0 : wait_panel_power_cycle(intel_dp);
1959 :
1960 0 : pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1961 0 : pp = ironlake_get_pp_control(intel_dp);
1962 0 : if (IS_GEN5(dev)) {
1963 : /* ILK workaround: disable reset around power sequence */
1964 0 : pp &= ~PANEL_POWER_RESET;
1965 0 : I915_WRITE(pp_ctrl_reg, pp);
1966 0 : POSTING_READ(pp_ctrl_reg);
1967 0 : }
1968 :
1969 0 : pp |= POWER_TARGET_ON;
1970 0 : if (!IS_GEN5(dev))
1971 0 : pp |= PANEL_POWER_RESET;
1972 :
1973 0 : I915_WRITE(pp_ctrl_reg, pp);
1974 0 : POSTING_READ(pp_ctrl_reg);
1975 :
1976 0 : wait_panel_on(intel_dp);
1977 0 : intel_dp->last_power_on = jiffies;
1978 :
1979 0 : if (IS_GEN5(dev)) {
1980 0 : pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1981 0 : I915_WRITE(pp_ctrl_reg, pp);
1982 0 : POSTING_READ(pp_ctrl_reg);
1983 0 : }
1984 0 : }
1985 :
1986 0 : void intel_edp_panel_on(struct intel_dp *intel_dp)
1987 : {
1988 0 : if (!is_edp(intel_dp))
1989 : return;
1990 :
1991 0 : pps_lock(intel_dp);
1992 0 : edp_panel_on(intel_dp);
1993 0 : pps_unlock(intel_dp);
1994 0 : }
1995 :
1996 :
1997 0 : static void edp_panel_off(struct intel_dp *intel_dp)
1998 : {
1999 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2000 0 : struct intel_encoder *intel_encoder = &intel_dig_port->base;
2001 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
2002 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2003 : enum intel_display_power_domain power_domain;
2004 : u32 pp;
2005 : u32 pp_ctrl_reg;
2006 :
2007 : lockdep_assert_held(&dev_priv->pps_mutex);
2008 :
2009 0 : if (!is_edp(intel_dp))
2010 0 : return;
2011 :
2012 : DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2013 : port_name(dp_to_dig_port(intel_dp)->port));
2014 :
2015 0 : WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2016 : port_name(dp_to_dig_port(intel_dp)->port));
2017 :
2018 0 : pp = ironlake_get_pp_control(intel_dp);
2019 : /* We need to switch off panel power _and_ force vdd, for otherwise some
2020 : * panels get very unhappy and cease to work. */
2021 0 : pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2022 : EDP_BLC_ENABLE);
2023 :
2024 0 : pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2025 :
2026 0 : intel_dp->want_panel_vdd = false;
2027 :
2028 0 : I915_WRITE(pp_ctrl_reg, pp);
2029 0 : POSTING_READ(pp_ctrl_reg);
2030 :
2031 0 : intel_dp->last_power_cycle = jiffies;
2032 0 : wait_panel_off(intel_dp);
2033 :
2034 : /* We got a reference when we enabled the VDD. */
2035 0 : power_domain = intel_display_port_aux_power_domain(intel_encoder);
2036 0 : intel_display_power_put(dev_priv, power_domain);
2037 0 : }
2038 :
2039 0 : void intel_edp_panel_off(struct intel_dp *intel_dp)
2040 : {
2041 0 : if (!is_edp(intel_dp))
2042 : return;
2043 :
2044 0 : pps_lock(intel_dp);
2045 0 : edp_panel_off(intel_dp);
2046 0 : pps_unlock(intel_dp);
2047 0 : }
2048 :
2049 : /* Enable backlight in the panel power control. */
2050 0 : static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2051 : {
2052 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2053 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
2054 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2055 : u32 pp;
2056 : u32 pp_ctrl_reg;
2057 :
2058 : /*
2059 : * If we enable the backlight right away following a panel power
2060 : * on, we may see slight flicker as the panel syncs with the eDP
2061 : * link. So delay a bit to make sure the image is solid before
2062 : * allowing it to appear.
2063 : */
2064 0 : wait_backlight_on(intel_dp);
2065 :
2066 0 : pps_lock(intel_dp);
2067 :
2068 0 : pp = ironlake_get_pp_control(intel_dp);
2069 0 : pp |= EDP_BLC_ENABLE;
2070 :
2071 0 : pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2072 :
2073 0 : I915_WRITE(pp_ctrl_reg, pp);
2074 0 : POSTING_READ(pp_ctrl_reg);
2075 :
2076 0 : pps_unlock(intel_dp);
2077 0 : }
2078 :
2079 : /* Enable backlight PWM and backlight PP control. */
2080 0 : void intel_edp_backlight_on(struct intel_dp *intel_dp)
2081 : {
2082 0 : if (!is_edp(intel_dp))
2083 : return;
2084 :
2085 : DRM_DEBUG_KMS("\n");
2086 :
2087 0 : intel_panel_enable_backlight(intel_dp->attached_connector);
2088 0 : _intel_edp_backlight_on(intel_dp);
2089 0 : }
2090 :
2091 : /* Disable backlight in the panel power control. */
2092 0 : static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2093 : {
2094 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
2095 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2096 : u32 pp;
2097 : u32 pp_ctrl_reg;
2098 :
2099 0 : if (!is_edp(intel_dp))
2100 0 : return;
2101 :
2102 0 : pps_lock(intel_dp);
2103 :
2104 0 : pp = ironlake_get_pp_control(intel_dp);
2105 0 : pp &= ~EDP_BLC_ENABLE;
2106 :
2107 0 : pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2108 :
2109 0 : I915_WRITE(pp_ctrl_reg, pp);
2110 0 : POSTING_READ(pp_ctrl_reg);
2111 :
2112 0 : pps_unlock(intel_dp);
2113 :
2114 0 : intel_dp->last_backlight_off = jiffies;
2115 0 : edp_wait_backlight_off(intel_dp);
2116 0 : }
2117 :
2118 : /* Disable backlight PP control and backlight PWM. */
2119 0 : void intel_edp_backlight_off(struct intel_dp *intel_dp)
2120 : {
2121 0 : if (!is_edp(intel_dp))
2122 : return;
2123 :
2124 : DRM_DEBUG_KMS("\n");
2125 :
2126 0 : _intel_edp_backlight_off(intel_dp);
2127 0 : intel_panel_disable_backlight(intel_dp->attached_connector);
2128 0 : }
2129 :
2130 : /*
2131 : * Hook for controlling the panel power control backlight through the bl_power
2132 : * sysfs attribute. Take care to handle multiple calls.
2133 : */
2134 0 : static void intel_edp_backlight_power(struct intel_connector *connector,
2135 : bool enable)
2136 : {
2137 0 : struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2138 : bool is_enabled;
2139 :
2140 0 : pps_lock(intel_dp);
2141 0 : is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2142 0 : pps_unlock(intel_dp);
2143 :
2144 0 : if (is_enabled == enable)
2145 0 : return;
2146 :
2147 : DRM_DEBUG_KMS("panel power control backlight %s\n",
2148 : enable ? "enable" : "disable");
2149 :
2150 0 : if (enable)
2151 0 : _intel_edp_backlight_on(intel_dp);
2152 : else
2153 0 : _intel_edp_backlight_off(intel_dp);
2154 0 : }
2155 :
2156 0 : static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2157 : {
2158 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2159 0 : struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2160 0 : struct drm_device *dev = crtc->dev;
2161 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2162 : u32 dpa_ctl;
2163 :
2164 0 : assert_pipe_disabled(dev_priv,
2165 : to_intel_crtc(crtc)->pipe);
2166 :
2167 : DRM_DEBUG_KMS("\n");
2168 0 : dpa_ctl = I915_READ(DP_A);
2169 0 : WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2170 0 : WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2171 :
2172 : /* We don't adjust intel_dp->DP while tearing down the link, to
2173 : * facilitate link retraining (e.g. after hotplug). Hence clear all
2174 : * enable bits here to ensure that we don't enable too much. */
2175 0 : intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2176 0 : intel_dp->DP |= DP_PLL_ENABLE;
2177 0 : I915_WRITE(DP_A, intel_dp->DP);
2178 0 : POSTING_READ(DP_A);
2179 0 : udelay(200);
2180 0 : }
2181 :
2182 0 : static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2183 : {
2184 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2185 0 : struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2186 0 : struct drm_device *dev = crtc->dev;
2187 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2188 : u32 dpa_ctl;
2189 :
2190 0 : assert_pipe_disabled(dev_priv,
2191 : to_intel_crtc(crtc)->pipe);
2192 :
2193 0 : dpa_ctl = I915_READ(DP_A);
2194 0 : WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2195 : "dp pll off, should be on\n");
2196 0 : WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2197 :
2198 : /* We can't rely on the value tracked for the DP register in
2199 : * intel_dp->DP because link_down must not change that (otherwise link
2200 : * re-training will fail. */
2201 0 : dpa_ctl &= ~DP_PLL_ENABLE;
2202 0 : I915_WRITE(DP_A, dpa_ctl);
2203 0 : POSTING_READ(DP_A);
2204 0 : udelay(200);
2205 0 : }
2206 :
2207 : /* If the sink supports it, try to set the power state appropriately */
2208 0 : void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2209 : {
2210 : int ret, i;
2211 :
2212 : /* Should have a valid DPCD by this point */
2213 0 : if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2214 0 : return;
2215 :
2216 0 : if (mode != DRM_MODE_DPMS_ON) {
2217 0 : ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2218 : DP_SET_POWER_D3);
2219 0 : } else {
2220 : /*
2221 : * When turning on, we need to retry for 1ms to give the sink
2222 : * time to wake up.
2223 : */
2224 0 : for (i = 0; i < 3; i++) {
2225 0 : ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2226 : DP_SET_POWER_D0);
2227 0 : if (ret == 1)
2228 : break;
2229 0 : drm_msleep(1);
2230 : }
2231 : }
2232 :
2233 : if (ret != 1)
2234 : DRM_DEBUG_KMS("failed to %s sink power state\n",
2235 : mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2236 0 : }
2237 :
2238 0 : static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2239 : enum pipe *pipe)
2240 : {
2241 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2242 0 : enum port port = dp_to_dig_port(intel_dp)->port;
2243 0 : struct drm_device *dev = encoder->base.dev;
2244 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2245 : enum intel_display_power_domain power_domain;
2246 : u32 tmp;
2247 :
2248 0 : power_domain = intel_display_port_power_domain(encoder);
2249 0 : if (!intel_display_power_is_enabled(dev_priv, power_domain))
2250 0 : return false;
2251 :
2252 0 : tmp = I915_READ(intel_dp->output_reg);
2253 :
2254 0 : if (!(tmp & DP_PORT_EN))
2255 0 : return false;
2256 :
2257 0 : if (IS_GEN7(dev) && port == PORT_A) {
2258 0 : *pipe = PORT_TO_PIPE_CPT(tmp);
2259 0 : } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2260 : enum pipe p;
2261 :
2262 0 : for_each_pipe(dev_priv, p) {
2263 0 : u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2264 0 : if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2265 0 : *pipe = p;
2266 0 : return true;
2267 : }
2268 0 : }
2269 :
2270 : DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2271 : intel_dp->output_reg);
2272 0 : } else if (IS_CHERRYVIEW(dev)) {
2273 0 : *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2274 0 : } else {
2275 0 : *pipe = PORT_TO_PIPE(tmp);
2276 : }
2277 :
2278 0 : return true;
2279 0 : }
2280 :
2281 0 : static void intel_dp_get_config(struct intel_encoder *encoder,
2282 : struct intel_crtc_state *pipe_config)
2283 : {
2284 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2285 : u32 tmp, flags = 0;
2286 0 : struct drm_device *dev = encoder->base.dev;
2287 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2288 0 : enum port port = dp_to_dig_port(intel_dp)->port;
2289 0 : struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2290 : int dotclock;
2291 :
2292 0 : tmp = I915_READ(intel_dp->output_reg);
2293 :
2294 0 : pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2295 :
2296 0 : if (HAS_PCH_CPT(dev) && port != PORT_A) {
2297 0 : u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2298 :
2299 0 : if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2300 0 : flags |= DRM_MODE_FLAG_PHSYNC;
2301 : else
2302 : flags |= DRM_MODE_FLAG_NHSYNC;
2303 :
2304 0 : if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2305 0 : flags |= DRM_MODE_FLAG_PVSYNC;
2306 : else
2307 0 : flags |= DRM_MODE_FLAG_NVSYNC;
2308 0 : } else {
2309 0 : if (tmp & DP_SYNC_HS_HIGH)
2310 0 : flags |= DRM_MODE_FLAG_PHSYNC;
2311 : else
2312 : flags |= DRM_MODE_FLAG_NHSYNC;
2313 :
2314 0 : if (tmp & DP_SYNC_VS_HIGH)
2315 0 : flags |= DRM_MODE_FLAG_PVSYNC;
2316 : else
2317 0 : flags |= DRM_MODE_FLAG_NVSYNC;
2318 : }
2319 :
2320 0 : pipe_config->base.adjusted_mode.flags |= flags;
2321 :
2322 0 : if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2323 0 : tmp & DP_COLOR_RANGE_16_235)
2324 0 : pipe_config->limited_color_range = true;
2325 :
2326 0 : pipe_config->has_dp_encoder = true;
2327 :
2328 0 : pipe_config->lane_count =
2329 0 : ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2330 :
2331 0 : intel_dp_get_m_n(crtc, pipe_config);
2332 :
2333 0 : if (port == PORT_A) {
2334 0 : if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2335 0 : pipe_config->port_clock = 162000;
2336 : else
2337 0 : pipe_config->port_clock = 270000;
2338 : }
2339 :
2340 0 : dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2341 0 : &pipe_config->dp_m_n);
2342 :
2343 0 : if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2344 0 : ironlake_check_encoder_dotclock(pipe_config, dotclock);
2345 :
2346 0 : pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2347 :
2348 0 : if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2349 0 : pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2350 : /*
2351 : * This is a big fat ugly hack.
2352 : *
2353 : * Some machines in UEFI boot mode provide us a VBT that has 18
2354 : * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2355 : * unknown we fail to light up. Yet the same BIOS boots up with
2356 : * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2357 : * max, not what it tells us to use.
2358 : *
2359 : * Note: This will still be broken if the eDP panel is not lit
2360 : * up by the BIOS, and thus we can't get the mode at module
2361 : * load.
2362 : */
2363 : DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2364 : pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2365 0 : dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2366 0 : }
2367 0 : }
2368 :
2369 0 : static void intel_disable_dp(struct intel_encoder *encoder)
2370 : {
2371 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2372 0 : struct drm_device *dev = encoder->base.dev;
2373 0 : struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2374 :
2375 0 : if (crtc->config->has_audio)
2376 0 : intel_audio_codec_disable(encoder);
2377 :
2378 0 : if (HAS_PSR(dev) && !HAS_DDI(dev))
2379 0 : intel_psr_disable(intel_dp);
2380 :
2381 : /* Make sure the panel is off before trying to change the mode. But also
2382 : * ensure that we have vdd while we switch off the panel. */
2383 0 : intel_edp_panel_vdd_on(intel_dp);
2384 0 : intel_edp_backlight_off(intel_dp);
2385 0 : intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2386 0 : intel_edp_panel_off(intel_dp);
2387 :
2388 : /* disable the port before the pipe on g4x */
2389 0 : if (INTEL_INFO(dev)->gen < 5)
2390 0 : intel_dp_link_down(intel_dp);
2391 0 : }
2392 :
2393 0 : static void ilk_post_disable_dp(struct intel_encoder *encoder)
2394 : {
2395 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2396 0 : enum port port = dp_to_dig_port(intel_dp)->port;
2397 :
2398 0 : intel_dp_link_down(intel_dp);
2399 0 : if (port == PORT_A)
2400 0 : ironlake_edp_pll_off(intel_dp);
2401 0 : }
2402 :
2403 0 : static void vlv_post_disable_dp(struct intel_encoder *encoder)
2404 : {
2405 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2406 :
2407 0 : intel_dp_link_down(intel_dp);
2408 0 : }
2409 :
2410 0 : static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2411 : bool reset)
2412 : {
2413 0 : struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2414 0 : enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2415 0 : struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2416 0 : enum pipe pipe = crtc->pipe;
2417 : uint32_t val;
2418 :
2419 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2420 0 : if (reset)
2421 0 : val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2422 : else
2423 0 : val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2424 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2425 :
2426 0 : if (crtc->config->lane_count > 2) {
2427 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2428 0 : if (reset)
2429 0 : val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2430 : else
2431 0 : val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2432 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2433 0 : }
2434 :
2435 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2436 0 : val |= CHV_PCS_REQ_SOFTRESET_EN;
2437 0 : if (reset)
2438 0 : val &= ~DPIO_PCS_CLK_SOFT_RESET;
2439 : else
2440 0 : val |= DPIO_PCS_CLK_SOFT_RESET;
2441 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2442 :
2443 0 : if (crtc->config->lane_count > 2) {
2444 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2445 0 : val |= CHV_PCS_REQ_SOFTRESET_EN;
2446 0 : if (reset)
2447 0 : val &= ~DPIO_PCS_CLK_SOFT_RESET;
2448 : else
2449 0 : val |= DPIO_PCS_CLK_SOFT_RESET;
2450 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2451 0 : }
2452 0 : }
2453 :
2454 0 : static void chv_post_disable_dp(struct intel_encoder *encoder)
2455 : {
2456 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2457 0 : struct drm_device *dev = encoder->base.dev;
2458 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2459 :
2460 0 : intel_dp_link_down(intel_dp);
2461 :
2462 0 : mutex_lock(&dev_priv->sb_lock);
2463 :
2464 : /* Assert data lane reset */
2465 0 : chv_data_lane_soft_reset(encoder, true);
2466 :
2467 0 : mutex_unlock(&dev_priv->sb_lock);
2468 0 : }
2469 :
2470 : static void
2471 0 : _intel_dp_set_link_train(struct intel_dp *intel_dp,
2472 : uint32_t *DP,
2473 : uint8_t dp_train_pat)
2474 : {
2475 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2476 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
2477 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2478 0 : enum port port = intel_dig_port->port;
2479 :
2480 0 : if (HAS_DDI(dev)) {
2481 0 : uint32_t temp = I915_READ(DP_TP_CTL(port));
2482 :
2483 0 : if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2484 0 : temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2485 : else
2486 0 : temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2487 :
2488 0 : temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2489 0 : switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2490 : case DP_TRAINING_PATTERN_DISABLE:
2491 0 : temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2492 :
2493 0 : break;
2494 : case DP_TRAINING_PATTERN_1:
2495 : temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2496 0 : break;
2497 : case DP_TRAINING_PATTERN_2:
2498 0 : temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2499 0 : break;
2500 : case DP_TRAINING_PATTERN_3:
2501 0 : temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2502 0 : break;
2503 : }
2504 0 : I915_WRITE(DP_TP_CTL(port), temp);
2505 :
2506 0 : } else if ((IS_GEN7(dev) && port == PORT_A) ||
2507 0 : (HAS_PCH_CPT(dev) && port != PORT_A)) {
2508 0 : *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2509 :
2510 0 : switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2511 : case DP_TRAINING_PATTERN_DISABLE:
2512 0 : *DP |= DP_LINK_TRAIN_OFF_CPT;
2513 0 : break;
2514 : case DP_TRAINING_PATTERN_1:
2515 0 : *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2516 0 : break;
2517 : case DP_TRAINING_PATTERN_2:
2518 0 : *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2519 0 : break;
2520 : case DP_TRAINING_PATTERN_3:
2521 0 : DRM_ERROR("DP training pattern 3 not supported\n");
2522 0 : *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2523 0 : break;
2524 : }
2525 :
2526 : } else {
2527 0 : if (IS_CHERRYVIEW(dev))
2528 0 : *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2529 : else
2530 0 : *DP &= ~DP_LINK_TRAIN_MASK;
2531 :
2532 0 : switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2533 : case DP_TRAINING_PATTERN_DISABLE:
2534 0 : *DP |= DP_LINK_TRAIN_OFF;
2535 0 : break;
2536 : case DP_TRAINING_PATTERN_1:
2537 0 : *DP |= DP_LINK_TRAIN_PAT_1;
2538 0 : break;
2539 : case DP_TRAINING_PATTERN_2:
2540 0 : *DP |= DP_LINK_TRAIN_PAT_2;
2541 0 : break;
2542 : case DP_TRAINING_PATTERN_3:
2543 0 : if (IS_CHERRYVIEW(dev)) {
2544 0 : *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2545 0 : } else {
2546 0 : DRM_ERROR("DP training pattern 3 not supported\n");
2547 0 : *DP |= DP_LINK_TRAIN_PAT_2;
2548 : }
2549 : break;
2550 : }
2551 : }
2552 0 : }
2553 :
2554 0 : static void intel_dp_enable_port(struct intel_dp *intel_dp)
2555 : {
2556 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
2557 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2558 :
2559 : /* enable with pattern 1 (as per spec) */
2560 0 : _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2561 : DP_TRAINING_PATTERN_1);
2562 :
2563 0 : I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2564 0 : POSTING_READ(intel_dp->output_reg);
2565 :
2566 : /*
2567 : * Magic for VLV/CHV. We _must_ first set up the register
2568 : * without actually enabling the port, and then do another
2569 : * write to enable the port. Otherwise link training will
2570 : * fail when the power sequencer is freshly used for this port.
2571 : */
2572 0 : intel_dp->DP |= DP_PORT_EN;
2573 :
2574 0 : I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2575 0 : POSTING_READ(intel_dp->output_reg);
2576 0 : }
2577 :
2578 0 : static void intel_enable_dp(struct intel_encoder *encoder)
2579 : {
2580 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2581 0 : struct drm_device *dev = encoder->base.dev;
2582 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2583 0 : struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2584 0 : uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2585 :
2586 0 : if (WARN_ON(dp_reg & DP_PORT_EN))
2587 0 : return;
2588 :
2589 0 : pps_lock(intel_dp);
2590 :
2591 0 : if (IS_VALLEYVIEW(dev))
2592 0 : vlv_init_panel_power_sequencer(intel_dp);
2593 :
2594 0 : intel_dp_enable_port(intel_dp);
2595 :
2596 0 : edp_panel_vdd_on(intel_dp);
2597 0 : edp_panel_on(intel_dp);
2598 0 : edp_panel_vdd_off(intel_dp, true);
2599 :
2600 0 : pps_unlock(intel_dp);
2601 :
2602 0 : if (IS_VALLEYVIEW(dev)) {
2603 : unsigned int lane_mask = 0x0;
2604 :
2605 0 : if (IS_CHERRYVIEW(dev))
2606 0 : lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2607 :
2608 0 : vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2609 : lane_mask);
2610 0 : }
2611 :
2612 0 : intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2613 0 : intel_dp_start_link_train(intel_dp);
2614 0 : intel_dp_stop_link_train(intel_dp);
2615 :
2616 0 : if (crtc->config->has_audio) {
2617 : DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2618 : pipe_name(crtc->pipe));
2619 0 : intel_audio_codec_enable(encoder);
2620 0 : }
2621 0 : }
2622 :
2623 0 : static void g4x_enable_dp(struct intel_encoder *encoder)
2624 : {
2625 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2626 :
2627 0 : intel_enable_dp(encoder);
2628 0 : intel_edp_backlight_on(intel_dp);
2629 0 : }
2630 :
2631 0 : static void vlv_enable_dp(struct intel_encoder *encoder)
2632 : {
2633 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2634 :
2635 0 : intel_edp_backlight_on(intel_dp);
2636 0 : intel_psr_enable(intel_dp);
2637 0 : }
2638 :
2639 0 : static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2640 : {
2641 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2642 0 : struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2643 :
2644 0 : intel_dp_prepare(encoder);
2645 :
2646 : /* Only ilk+ has port A */
2647 0 : if (dport->port == PORT_A) {
2648 0 : ironlake_set_pll_cpu_edp(intel_dp);
2649 0 : ironlake_edp_pll_on(intel_dp);
2650 0 : }
2651 0 : }
2652 :
2653 0 : static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2654 : {
2655 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2656 0 : struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2657 0 : enum pipe pipe = intel_dp->pps_pipe;
2658 0 : int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2659 :
2660 0 : edp_panel_vdd_off_sync(intel_dp);
2661 :
2662 : /*
2663 : * VLV seems to get confused when multiple power seqeuencers
2664 : * have the same port selected (even if only one has power/vdd
2665 : * enabled). The failure manifests as vlv_wait_port_ready() failing
2666 : * CHV on the other hand doesn't seem to mind having the same port
2667 : * selected in multiple power seqeuencers, but let's clear the
2668 : * port select always when logically disconnecting a power sequencer
2669 : * from a port.
2670 : */
2671 : DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2672 : pipe_name(pipe), port_name(intel_dig_port->port));
2673 0 : I915_WRITE(pp_on_reg, 0);
2674 0 : POSTING_READ(pp_on_reg);
2675 :
2676 0 : intel_dp->pps_pipe = INVALID_PIPE;
2677 0 : }
2678 :
2679 0 : static void vlv_steal_power_sequencer(struct drm_device *dev,
2680 : enum pipe pipe)
2681 : {
2682 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2683 : struct intel_encoder *encoder;
2684 :
2685 : lockdep_assert_held(&dev_priv->pps_mutex);
2686 :
2687 0 : if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2688 0 : return;
2689 :
2690 0 : list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2691 : base.head) {
2692 : struct intel_dp *intel_dp;
2693 : enum port port;
2694 :
2695 0 : if (encoder->type != INTEL_OUTPUT_EDP)
2696 0 : continue;
2697 :
2698 0 : intel_dp = enc_to_intel_dp(&encoder->base);
2699 0 : port = dp_to_dig_port(intel_dp)->port;
2700 :
2701 0 : if (intel_dp->pps_pipe != pipe)
2702 0 : continue;
2703 :
2704 : DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2705 : pipe_name(pipe), port_name(port));
2706 :
2707 0 : WARN(encoder->base.crtc,
2708 : "stealing pipe %c power sequencer from active eDP port %c\n",
2709 : pipe_name(pipe), port_name(port));
2710 :
2711 : /* make sure vdd is off before we steal it */
2712 0 : vlv_detach_power_sequencer(intel_dp);
2713 0 : }
2714 0 : }
2715 :
2716 0 : static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2717 : {
2718 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2719 0 : struct intel_encoder *encoder = &intel_dig_port->base;
2720 0 : struct drm_device *dev = encoder->base.dev;
2721 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2722 0 : struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2723 :
2724 : lockdep_assert_held(&dev_priv->pps_mutex);
2725 :
2726 0 : if (!is_edp(intel_dp))
2727 0 : return;
2728 :
2729 0 : if (intel_dp->pps_pipe == crtc->pipe)
2730 0 : return;
2731 :
2732 : /*
2733 : * If another power sequencer was being used on this
2734 : * port previously make sure to turn off vdd there while
2735 : * we still have control of it.
2736 : */
2737 0 : if (intel_dp->pps_pipe != INVALID_PIPE)
2738 0 : vlv_detach_power_sequencer(intel_dp);
2739 :
2740 : /*
2741 : * We may be stealing the power
2742 : * sequencer from another port.
2743 : */
2744 0 : vlv_steal_power_sequencer(dev, crtc->pipe);
2745 :
2746 : /* now it's all ours */
2747 0 : intel_dp->pps_pipe = crtc->pipe;
2748 :
2749 : DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2750 : pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2751 :
2752 : /* init power sequencer on this pipe and port */
2753 0 : intel_dp_init_panel_power_sequencer(dev, intel_dp);
2754 0 : intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2755 0 : }
2756 :
2757 0 : static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2758 : {
2759 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2760 0 : struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2761 0 : struct drm_device *dev = encoder->base.dev;
2762 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2763 0 : struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2764 0 : enum dpio_channel port = vlv_dport_to_channel(dport);
2765 0 : int pipe = intel_crtc->pipe;
2766 : u32 val;
2767 :
2768 0 : mutex_lock(&dev_priv->sb_lock);
2769 :
2770 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2771 : val = 0;
2772 0 : if (pipe)
2773 0 : val |= (1<<21);
2774 : else
2775 : val &= ~(1<<21);
2776 0 : val |= 0x001000c4;
2777 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2778 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2779 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2780 :
2781 0 : mutex_unlock(&dev_priv->sb_lock);
2782 :
2783 0 : intel_enable_dp(encoder);
2784 0 : }
2785 :
2786 0 : static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2787 : {
2788 0 : struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2789 0 : struct drm_device *dev = encoder->base.dev;
2790 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2791 : struct intel_crtc *intel_crtc =
2792 0 : to_intel_crtc(encoder->base.crtc);
2793 0 : enum dpio_channel port = vlv_dport_to_channel(dport);
2794 0 : int pipe = intel_crtc->pipe;
2795 :
2796 0 : intel_dp_prepare(encoder);
2797 :
2798 : /* Program Tx lane resets to default */
2799 0 : mutex_lock(&dev_priv->sb_lock);
2800 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2801 : DPIO_PCS_TX_LANE2_RESET |
2802 : DPIO_PCS_TX_LANE1_RESET);
2803 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2804 : DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2805 : DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2806 : (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2807 : DPIO_PCS_CLK_SOFT_RESET);
2808 :
2809 : /* Fix up inter-pair skew failure */
2810 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2811 0 : vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2812 0 : vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2813 0 : mutex_unlock(&dev_priv->sb_lock);
2814 0 : }
2815 :
2816 0 : static void chv_pre_enable_dp(struct intel_encoder *encoder)
2817 : {
2818 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2819 0 : struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2820 0 : struct drm_device *dev = encoder->base.dev;
2821 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2822 : struct intel_crtc *intel_crtc =
2823 0 : to_intel_crtc(encoder->base.crtc);
2824 0 : enum dpio_channel ch = vlv_dport_to_channel(dport);
2825 0 : int pipe = intel_crtc->pipe;
2826 : int data, i, stagger;
2827 : u32 val;
2828 :
2829 0 : mutex_lock(&dev_priv->sb_lock);
2830 :
2831 : /* allow hardware to manage TX FIFO reset source */
2832 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2833 0 : val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2834 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2835 :
2836 0 : if (intel_crtc->config->lane_count > 2) {
2837 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2838 0 : val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2839 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2840 0 : }
2841 :
2842 : /* Program Tx lane latency optimal setting*/
2843 0 : for (i = 0; i < intel_crtc->config->lane_count; i++) {
2844 : /* Set the upar bit */
2845 0 : if (intel_crtc->config->lane_count == 1)
2846 0 : data = 0x0;
2847 : else
2848 0 : data = (i == 1) ? 0x0 : 0x1;
2849 0 : vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2850 0 : data << DPIO_UPAR_SHIFT);
2851 : }
2852 :
2853 : /* Data lane stagger programming */
2854 0 : if (intel_crtc->config->port_clock > 270000)
2855 0 : stagger = 0x18;
2856 0 : else if (intel_crtc->config->port_clock > 135000)
2857 0 : stagger = 0xd;
2858 0 : else if (intel_crtc->config->port_clock > 67500)
2859 0 : stagger = 0x7;
2860 0 : else if (intel_crtc->config->port_clock > 33750)
2861 0 : stagger = 0x4;
2862 : else
2863 : stagger = 0x2;
2864 :
2865 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2866 0 : val |= DPIO_TX2_STAGGER_MASK(0x1f);
2867 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2868 :
2869 0 : if (intel_crtc->config->lane_count > 2) {
2870 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2871 0 : val |= DPIO_TX2_STAGGER_MASK(0x1f);
2872 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2873 0 : }
2874 :
2875 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2876 0 : DPIO_LANESTAGGER_STRAP(stagger) |
2877 0 : DPIO_LANESTAGGER_STRAP_OVRD |
2878 0 : DPIO_TX1_STAGGER_MASK(0x1f) |
2879 : DPIO_TX1_STAGGER_MULT(6) |
2880 : DPIO_TX2_STAGGER_MULT(0));
2881 :
2882 0 : if (intel_crtc->config->lane_count > 2) {
2883 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2884 : DPIO_LANESTAGGER_STRAP(stagger) |
2885 : DPIO_LANESTAGGER_STRAP_OVRD |
2886 0 : DPIO_TX1_STAGGER_MASK(0x1f) |
2887 0 : DPIO_TX1_STAGGER_MULT(7) |
2888 : DPIO_TX2_STAGGER_MULT(5));
2889 0 : }
2890 :
2891 : /* Deassert data lane reset */
2892 0 : chv_data_lane_soft_reset(encoder, false);
2893 :
2894 0 : mutex_unlock(&dev_priv->sb_lock);
2895 :
2896 0 : intel_enable_dp(encoder);
2897 :
2898 : /* Second common lane will stay alive on its own now */
2899 0 : if (dport->release_cl2_override) {
2900 0 : chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2901 0 : dport->release_cl2_override = false;
2902 0 : }
2903 0 : }
2904 :
2905 0 : static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2906 : {
2907 0 : struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2908 0 : struct drm_device *dev = encoder->base.dev;
2909 0 : struct drm_i915_private *dev_priv = dev->dev_private;
2910 : struct intel_crtc *intel_crtc =
2911 0 : to_intel_crtc(encoder->base.crtc);
2912 0 : enum dpio_channel ch = vlv_dport_to_channel(dport);
2913 0 : enum pipe pipe = intel_crtc->pipe;
2914 : unsigned int lane_mask =
2915 0 : intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2916 : u32 val;
2917 :
2918 0 : intel_dp_prepare(encoder);
2919 :
2920 : /*
2921 : * Must trick the second common lane into life.
2922 : * Otherwise we can't even access the PLL.
2923 : */
2924 0 : if (ch == DPIO_CH0 && pipe == PIPE_B)
2925 0 : dport->release_cl2_override =
2926 0 : !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2927 :
2928 0 : chv_phy_powergate_lanes(encoder, true, lane_mask);
2929 :
2930 0 : mutex_lock(&dev_priv->sb_lock);
2931 :
2932 : /* Assert data lane reset */
2933 0 : chv_data_lane_soft_reset(encoder, true);
2934 :
2935 : /* program left/right clock distribution */
2936 0 : if (pipe != PIPE_B) {
2937 0 : val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2938 0 : val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2939 0 : if (ch == DPIO_CH0)
2940 0 : val |= CHV_BUFLEFTENA1_FORCE;
2941 0 : if (ch == DPIO_CH1)
2942 0 : val |= CHV_BUFRIGHTENA1_FORCE;
2943 0 : vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2944 0 : } else {
2945 0 : val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2946 0 : val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2947 0 : if (ch == DPIO_CH0)
2948 0 : val |= CHV_BUFLEFTENA2_FORCE;
2949 0 : if (ch == DPIO_CH1)
2950 0 : val |= CHV_BUFRIGHTENA2_FORCE;
2951 0 : vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2952 : }
2953 :
2954 : /* program clock channel usage */
2955 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2956 0 : val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2957 0 : if (pipe != PIPE_B)
2958 0 : val &= ~CHV_PCS_USEDCLKCHANNEL;
2959 : else
2960 0 : val |= CHV_PCS_USEDCLKCHANNEL;
2961 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2962 :
2963 0 : if (intel_crtc->config->lane_count > 2) {
2964 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2965 0 : val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2966 0 : if (pipe != PIPE_B)
2967 0 : val &= ~CHV_PCS_USEDCLKCHANNEL;
2968 : else
2969 0 : val |= CHV_PCS_USEDCLKCHANNEL;
2970 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2971 0 : }
2972 :
2973 : /*
2974 : * This a a bit weird since generally CL
2975 : * matches the pipe, but here we need to
2976 : * pick the CL based on the port.
2977 : */
2978 0 : val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2979 0 : if (pipe != PIPE_B)
2980 0 : val &= ~CHV_CMN_USEDCLKCHANNEL;
2981 : else
2982 0 : val |= CHV_CMN_USEDCLKCHANNEL;
2983 0 : vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2984 :
2985 0 : mutex_unlock(&dev_priv->sb_lock);
2986 0 : }
2987 :
2988 0 : static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2989 : {
2990 0 : struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2991 0 : enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2992 : u32 val;
2993 :
2994 0 : mutex_lock(&dev_priv->sb_lock);
2995 :
2996 : /* disable left/right clock distribution */
2997 0 : if (pipe != PIPE_B) {
2998 0 : val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2999 0 : val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3000 0 : vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3001 0 : } else {
3002 0 : val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3003 0 : val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3004 0 : vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3005 : }
3006 :
3007 0 : mutex_unlock(&dev_priv->sb_lock);
3008 :
3009 : /*
3010 : * Leave the power down bit cleared for at least one
3011 : * lane so that chv_powergate_phy_ch() will power
3012 : * on something when the channel is otherwise unused.
3013 : * When the port is off and the override is removed
3014 : * the lanes power down anyway, so otherwise it doesn't
3015 : * really matter what the state of power down bits is
3016 : * after this.
3017 : */
3018 0 : chv_phy_powergate_lanes(encoder, false, 0x0);
3019 0 : }
3020 :
3021 : /*
3022 : * Native read with retry for link status and receiver capability reads for
3023 : * cases where the sink may still be asleep.
3024 : *
3025 : * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3026 : * supposed to retry 3 times per the spec.
3027 : */
3028 : static ssize_t
3029 0 : intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3030 : void *buffer, size_t size)
3031 : {
3032 : ssize_t ret;
3033 : int i;
3034 :
3035 : /*
3036 : * Sometime we just get the same incorrect byte repeated
3037 : * over the entire buffer. Doing just one throw away read
3038 : * initially seems to "solve" it.
3039 : */
3040 0 : drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3041 :
3042 0 : for (i = 0; i < 3; i++) {
3043 0 : ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3044 0 : if (ret == size)
3045 0 : return ret;
3046 0 : drm_msleep(1);
3047 : }
3048 :
3049 0 : return ret;
3050 0 : }
3051 :
3052 : /*
3053 : * Fetch AUX CH registers 0x202 - 0x207 which contain
3054 : * link status information
3055 : */
3056 : static bool
3057 0 : intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3058 : {
3059 0 : return intel_dp_dpcd_read_wake(&intel_dp->aux,
3060 : DP_LANE0_1_STATUS,
3061 : link_status,
3062 0 : DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3063 : }
3064 :
3065 : /* These are source-specific values. */
3066 : static uint8_t
3067 0 : intel_dp_voltage_max(struct intel_dp *intel_dp)
3068 : {
3069 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
3070 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3071 0 : enum port port = dp_to_dig_port(intel_dp)->port;
3072 :
3073 0 : if (IS_BROXTON(dev))
3074 0 : return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3075 0 : else if (INTEL_INFO(dev)->gen >= 9) {
3076 0 : if (dev_priv->edp_low_vswing && port == PORT_A)
3077 0 : return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3078 0 : return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3079 0 : } else if (IS_VALLEYVIEW(dev))
3080 0 : return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3081 0 : else if (IS_GEN7(dev) && port == PORT_A)
3082 0 : return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3083 0 : else if (HAS_PCH_CPT(dev) && port != PORT_A)
3084 0 : return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3085 : else
3086 0 : return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3087 0 : }
3088 :
3089 : static uint8_t
3090 0 : intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3091 : {
3092 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
3093 0 : enum port port = dp_to_dig_port(intel_dp)->port;
3094 :
3095 0 : if (INTEL_INFO(dev)->gen >= 9) {
3096 0 : switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3097 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3098 0 : return DP_TRAIN_PRE_EMPH_LEVEL_3;
3099 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3100 0 : return DP_TRAIN_PRE_EMPH_LEVEL_2;
3101 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3102 0 : return DP_TRAIN_PRE_EMPH_LEVEL_1;
3103 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3104 0 : return DP_TRAIN_PRE_EMPH_LEVEL_0;
3105 : default:
3106 : return DP_TRAIN_PRE_EMPH_LEVEL_0;
3107 : }
3108 0 : } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3109 0 : switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3110 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3111 0 : return DP_TRAIN_PRE_EMPH_LEVEL_3;
3112 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3113 0 : return DP_TRAIN_PRE_EMPH_LEVEL_2;
3114 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3115 0 : return DP_TRAIN_PRE_EMPH_LEVEL_1;
3116 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3117 : default:
3118 0 : return DP_TRAIN_PRE_EMPH_LEVEL_0;
3119 : }
3120 0 : } else if (IS_VALLEYVIEW(dev)) {
3121 0 : switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3122 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3123 0 : return DP_TRAIN_PRE_EMPH_LEVEL_3;
3124 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3125 0 : return DP_TRAIN_PRE_EMPH_LEVEL_2;
3126 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3127 0 : return DP_TRAIN_PRE_EMPH_LEVEL_1;
3128 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3129 : default:
3130 0 : return DP_TRAIN_PRE_EMPH_LEVEL_0;
3131 : }
3132 0 : } else if (IS_GEN7(dev) && port == PORT_A) {
3133 0 : switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3134 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3135 0 : return DP_TRAIN_PRE_EMPH_LEVEL_2;
3136 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3137 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3138 0 : return DP_TRAIN_PRE_EMPH_LEVEL_1;
3139 : default:
3140 0 : return DP_TRAIN_PRE_EMPH_LEVEL_0;
3141 : }
3142 : } else {
3143 0 : switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3144 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3145 0 : return DP_TRAIN_PRE_EMPH_LEVEL_2;
3146 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3147 0 : return DP_TRAIN_PRE_EMPH_LEVEL_2;
3148 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3149 0 : return DP_TRAIN_PRE_EMPH_LEVEL_1;
3150 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3151 : default:
3152 0 : return DP_TRAIN_PRE_EMPH_LEVEL_0;
3153 : }
3154 : }
3155 0 : }
3156 :
3157 0 : static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3158 : {
3159 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
3160 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3161 0 : struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3162 : struct intel_crtc *intel_crtc =
3163 0 : to_intel_crtc(dport->base.base.crtc);
3164 : unsigned long demph_reg_value, preemph_reg_value,
3165 : uniqtranscale_reg_value;
3166 0 : uint8_t train_set = intel_dp->train_set[0];
3167 0 : enum dpio_channel port = vlv_dport_to_channel(dport);
3168 0 : int pipe = intel_crtc->pipe;
3169 :
3170 0 : switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3171 : case DP_TRAIN_PRE_EMPH_LEVEL_0:
3172 : preemph_reg_value = 0x0004000;
3173 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3174 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3175 : demph_reg_value = 0x2B405555;
3176 : uniqtranscale_reg_value = 0x552AB83A;
3177 0 : break;
3178 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3179 : demph_reg_value = 0x2B404040;
3180 : uniqtranscale_reg_value = 0x5548B83A;
3181 0 : break;
3182 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3183 : demph_reg_value = 0x2B245555;
3184 : uniqtranscale_reg_value = 0x5560B83A;
3185 0 : break;
3186 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3187 : demph_reg_value = 0x2B405555;
3188 : uniqtranscale_reg_value = 0x5598DA3A;
3189 0 : break;
3190 : default:
3191 : return 0;
3192 : }
3193 : break;
3194 : case DP_TRAIN_PRE_EMPH_LEVEL_1:
3195 : preemph_reg_value = 0x0002000;
3196 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3197 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3198 : demph_reg_value = 0x2B404040;
3199 : uniqtranscale_reg_value = 0x5552B83A;
3200 0 : break;
3201 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3202 : demph_reg_value = 0x2B404848;
3203 : uniqtranscale_reg_value = 0x5580B83A;
3204 0 : break;
3205 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3206 : demph_reg_value = 0x2B404040;
3207 : uniqtranscale_reg_value = 0x55ADDA3A;
3208 0 : break;
3209 : default:
3210 0 : return 0;
3211 : }
3212 : break;
3213 : case DP_TRAIN_PRE_EMPH_LEVEL_2:
3214 : preemph_reg_value = 0x0000000;
3215 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3216 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3217 : demph_reg_value = 0x2B305555;
3218 : uniqtranscale_reg_value = 0x5570B83A;
3219 0 : break;
3220 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3221 : demph_reg_value = 0x2B2B4040;
3222 : uniqtranscale_reg_value = 0x55ADDA3A;
3223 0 : break;
3224 : default:
3225 0 : return 0;
3226 : }
3227 : break;
3228 : case DP_TRAIN_PRE_EMPH_LEVEL_3:
3229 : preemph_reg_value = 0x0006000;
3230 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3231 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3232 : demph_reg_value = 0x1B405555;
3233 : uniqtranscale_reg_value = 0x55ADDA3A;
3234 : break;
3235 : default:
3236 0 : return 0;
3237 : }
3238 0 : break;
3239 : default:
3240 : return 0;
3241 : }
3242 :
3243 0 : mutex_lock(&dev_priv->sb_lock);
3244 0 : vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3245 0 : vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3246 0 : vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3247 0 : uniqtranscale_reg_value);
3248 0 : vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3249 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3250 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3251 0 : vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3252 0 : mutex_unlock(&dev_priv->sb_lock);
3253 :
3254 0 : return 0;
3255 0 : }
3256 :
3257 0 : static bool chv_need_uniq_trans_scale(uint8_t train_set)
3258 : {
3259 0 : return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3260 0 : (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3261 : }
3262 :
3263 0 : static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3264 : {
3265 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
3266 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3267 0 : struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3268 0 : struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3269 : u32 deemph_reg_value, margin_reg_value, val;
3270 0 : uint8_t train_set = intel_dp->train_set[0];
3271 0 : enum dpio_channel ch = vlv_dport_to_channel(dport);
3272 0 : enum pipe pipe = intel_crtc->pipe;
3273 : int i;
3274 :
3275 0 : switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3276 : case DP_TRAIN_PRE_EMPH_LEVEL_0:
3277 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3278 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3279 : deemph_reg_value = 128;
3280 : margin_reg_value = 52;
3281 0 : break;
3282 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3283 : deemph_reg_value = 128;
3284 : margin_reg_value = 77;
3285 0 : break;
3286 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3287 : deemph_reg_value = 128;
3288 : margin_reg_value = 102;
3289 0 : break;
3290 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3291 : deemph_reg_value = 128;
3292 : margin_reg_value = 154;
3293 : /* FIXME extra to set for 1200 */
3294 0 : break;
3295 : default:
3296 : return 0;
3297 : }
3298 : break;
3299 : case DP_TRAIN_PRE_EMPH_LEVEL_1:
3300 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3301 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3302 : deemph_reg_value = 85;
3303 : margin_reg_value = 78;
3304 0 : break;
3305 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3306 : deemph_reg_value = 85;
3307 : margin_reg_value = 116;
3308 0 : break;
3309 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3310 : deemph_reg_value = 85;
3311 : margin_reg_value = 154;
3312 0 : break;
3313 : default:
3314 0 : return 0;
3315 : }
3316 : break;
3317 : case DP_TRAIN_PRE_EMPH_LEVEL_2:
3318 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3319 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3320 : deemph_reg_value = 64;
3321 : margin_reg_value = 104;
3322 0 : break;
3323 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3324 : deemph_reg_value = 64;
3325 : margin_reg_value = 154;
3326 0 : break;
3327 : default:
3328 0 : return 0;
3329 : }
3330 : break;
3331 : case DP_TRAIN_PRE_EMPH_LEVEL_3:
3332 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3333 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3334 : deemph_reg_value = 43;
3335 : margin_reg_value = 154;
3336 : break;
3337 : default:
3338 0 : return 0;
3339 : }
3340 0 : break;
3341 : default:
3342 : return 0;
3343 : }
3344 :
3345 0 : mutex_lock(&dev_priv->sb_lock);
3346 :
3347 : /* Clear calc init */
3348 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3349 0 : val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3350 0 : val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3351 : val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3352 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3353 :
3354 0 : if (intel_crtc->config->lane_count > 2) {
3355 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3356 0 : val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3357 0 : val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3358 : val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3359 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3360 0 : }
3361 :
3362 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3363 0 : val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3364 : val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3365 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3366 :
3367 0 : if (intel_crtc->config->lane_count > 2) {
3368 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3369 0 : val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3370 : val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3371 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3372 0 : }
3373 :
3374 : /* Program swing deemph */
3375 0 : for (i = 0; i < intel_crtc->config->lane_count; i++) {
3376 0 : val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3377 0 : val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3378 0 : val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3379 0 : vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3380 : }
3381 :
3382 : /* Program swing margin */
3383 0 : for (i = 0; i < intel_crtc->config->lane_count; i++) {
3384 0 : val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3385 :
3386 0 : val &= ~DPIO_SWING_MARGIN000_MASK;
3387 0 : val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3388 :
3389 : /*
3390 : * Supposedly this value shouldn't matter when unique transition
3391 : * scale is disabled, but in fact it does matter. Let's just
3392 : * always program the same value and hope it's OK.
3393 : */
3394 0 : val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3395 0 : val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3396 :
3397 0 : vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3398 : }
3399 :
3400 : /*
3401 : * The document said it needs to set bit 27 for ch0 and bit 26
3402 : * for ch1. Might be a typo in the doc.
3403 : * For now, for this unique transition scale selection, set bit
3404 : * 27 for ch0 and ch1.
3405 : */
3406 0 : for (i = 0; i < intel_crtc->config->lane_count; i++) {
3407 0 : val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3408 0 : if (chv_need_uniq_trans_scale(train_set))
3409 0 : val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3410 : else
3411 0 : val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3412 0 : vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3413 : }
3414 :
3415 : /* Start swing calculation */
3416 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3417 0 : val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3418 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3419 :
3420 0 : if (intel_crtc->config->lane_count > 2) {
3421 0 : val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3422 0 : val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3423 0 : vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3424 0 : }
3425 :
3426 0 : mutex_unlock(&dev_priv->sb_lock);
3427 :
3428 0 : return 0;
3429 0 : }
3430 :
3431 : static void
3432 0 : intel_get_adjust_train(struct intel_dp *intel_dp,
3433 : const uint8_t link_status[DP_LINK_STATUS_SIZE])
3434 : {
3435 : uint8_t v = 0;
3436 : uint8_t p = 0;
3437 : int lane;
3438 : uint8_t voltage_max;
3439 : uint8_t preemph_max;
3440 :
3441 0 : for (lane = 0; lane < intel_dp->lane_count; lane++) {
3442 0 : uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3443 0 : uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3444 :
3445 0 : if (this_v > v)
3446 0 : v = this_v;
3447 0 : if (this_p > p)
3448 0 : p = this_p;
3449 : }
3450 :
3451 0 : voltage_max = intel_dp_voltage_max(intel_dp);
3452 0 : if (v >= voltage_max)
3453 0 : v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3454 :
3455 0 : preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3456 0 : if (p >= preemph_max)
3457 0 : p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3458 :
3459 0 : for (lane = 0; lane < 4; lane++)
3460 0 : intel_dp->train_set[lane] = v | p;
3461 0 : }
3462 :
3463 : static uint32_t
3464 0 : gen4_signal_levels(uint8_t train_set)
3465 : {
3466 : uint32_t signal_levels = 0;
3467 :
3468 0 : switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3469 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3470 : default:
3471 : signal_levels |= DP_VOLTAGE_0_4;
3472 0 : break;
3473 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3474 : signal_levels |= DP_VOLTAGE_0_6;
3475 0 : break;
3476 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3477 : signal_levels |= DP_VOLTAGE_0_8;
3478 0 : break;
3479 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3480 : signal_levels |= DP_VOLTAGE_1_2;
3481 0 : break;
3482 : }
3483 0 : switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3484 : case DP_TRAIN_PRE_EMPH_LEVEL_0:
3485 : default:
3486 : signal_levels |= DP_PRE_EMPHASIS_0;
3487 0 : break;
3488 : case DP_TRAIN_PRE_EMPH_LEVEL_1:
3489 0 : signal_levels |= DP_PRE_EMPHASIS_3_5;
3490 0 : break;
3491 : case DP_TRAIN_PRE_EMPH_LEVEL_2:
3492 0 : signal_levels |= DP_PRE_EMPHASIS_6;
3493 0 : break;
3494 : case DP_TRAIN_PRE_EMPH_LEVEL_3:
3495 0 : signal_levels |= DP_PRE_EMPHASIS_9_5;
3496 0 : break;
3497 : }
3498 0 : return signal_levels;
3499 : }
3500 :
3501 : /* Gen6's DP voltage swing and pre-emphasis control */
3502 : static uint32_t
3503 0 : gen6_edp_signal_levels(uint8_t train_set)
3504 : {
3505 0 : int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3506 : DP_TRAIN_PRE_EMPHASIS_MASK);
3507 0 : switch (signal_levels) {
3508 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3509 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3510 0 : return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3511 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3512 0 : return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3513 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3514 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3515 0 : return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3516 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3517 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3518 0 : return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3519 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3520 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3521 0 : return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3522 : default:
3523 : DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3524 : "0x%x\n", signal_levels);
3525 0 : return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3526 : }
3527 0 : }
3528 :
3529 : /* Gen7's DP voltage swing and pre-emphasis control */
3530 : static uint32_t
3531 0 : gen7_edp_signal_levels(uint8_t train_set)
3532 : {
3533 0 : int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3534 : DP_TRAIN_PRE_EMPHASIS_MASK);
3535 0 : switch (signal_levels) {
3536 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3537 0 : return EDP_LINK_TRAIN_400MV_0DB_IVB;
3538 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3539 0 : return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3540 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3541 0 : return EDP_LINK_TRAIN_400MV_6DB_IVB;
3542 :
3543 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3544 0 : return EDP_LINK_TRAIN_600MV_0DB_IVB;
3545 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3546 0 : return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3547 :
3548 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3549 0 : return EDP_LINK_TRAIN_800MV_0DB_IVB;
3550 : case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3551 0 : return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3552 :
3553 : default:
3554 : DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3555 : "0x%x\n", signal_levels);
3556 0 : return EDP_LINK_TRAIN_500MV_0DB_IVB;
3557 : }
3558 0 : }
3559 :
3560 : /* Properly updates "DP" with the correct signal levels. */
3561 : static void
3562 0 : intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3563 : {
3564 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3565 0 : enum port port = intel_dig_port->port;
3566 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
3567 : uint32_t signal_levels, mask = 0;
3568 0 : uint8_t train_set = intel_dp->train_set[0];
3569 :
3570 0 : if (HAS_DDI(dev)) {
3571 0 : signal_levels = ddi_signal_levels(intel_dp);
3572 :
3573 0 : if (IS_BROXTON(dev))
3574 0 : signal_levels = 0;
3575 : else
3576 : mask = DDI_BUF_EMP_MASK;
3577 0 : } else if (IS_CHERRYVIEW(dev)) {
3578 0 : signal_levels = chv_signal_levels(intel_dp);
3579 0 : } else if (IS_VALLEYVIEW(dev)) {
3580 0 : signal_levels = vlv_signal_levels(intel_dp);
3581 0 : } else if (IS_GEN7(dev) && port == PORT_A) {
3582 0 : signal_levels = gen7_edp_signal_levels(train_set);
3583 : mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3584 0 : } else if (IS_GEN6(dev) && port == PORT_A) {
3585 0 : signal_levels = gen6_edp_signal_levels(train_set);
3586 : mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3587 0 : } else {
3588 0 : signal_levels = gen4_signal_levels(train_set);
3589 : mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3590 : }
3591 :
3592 : if (mask)
3593 : DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3594 :
3595 : DRM_DEBUG_KMS("Using vswing level %d\n",
3596 : train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3597 : DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3598 : (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3599 : DP_TRAIN_PRE_EMPHASIS_SHIFT);
3600 :
3601 0 : *DP = (*DP & ~mask) | signal_levels;
3602 0 : }
3603 :
3604 : static bool
3605 0 : intel_dp_set_link_train(struct intel_dp *intel_dp,
3606 : uint32_t *DP,
3607 : uint8_t dp_train_pat)
3608 : {
3609 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3610 : struct drm_i915_private *dev_priv =
3611 0 : to_i915(intel_dig_port->base.base.dev);
3612 0 : uint8_t buf[sizeof(intel_dp->train_set) + 1];
3613 : int ret, len;
3614 :
3615 0 : _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3616 :
3617 0 : I915_WRITE(intel_dp->output_reg, *DP);
3618 0 : POSTING_READ(intel_dp->output_reg);
3619 :
3620 0 : buf[0] = dp_train_pat;
3621 0 : if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3622 : DP_TRAINING_PATTERN_DISABLE) {
3623 : /* don't write DP_TRAINING_LANEx_SET on disable */
3624 : len = 1;
3625 0 : } else {
3626 : /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3627 0 : memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3628 0 : len = intel_dp->lane_count + 1;
3629 : }
3630 :
3631 0 : ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3632 0 : buf, len);
3633 :
3634 0 : return ret == len;
3635 0 : }
3636 :
3637 : static bool
3638 0 : intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3639 : uint8_t dp_train_pat)
3640 : {
3641 0 : memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3642 0 : intel_dp_set_signal_levels(intel_dp, DP);
3643 0 : return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3644 : }
3645 :
3646 : static bool
3647 0 : intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3648 : const uint8_t link_status[DP_LINK_STATUS_SIZE])
3649 : {
3650 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3651 : struct drm_i915_private *dev_priv =
3652 0 : to_i915(intel_dig_port->base.base.dev);
3653 : int ret;
3654 :
3655 0 : intel_get_adjust_train(intel_dp, link_status);
3656 0 : intel_dp_set_signal_levels(intel_dp, DP);
3657 :
3658 0 : I915_WRITE(intel_dp->output_reg, *DP);
3659 0 : POSTING_READ(intel_dp->output_reg);
3660 :
3661 0 : ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3662 0 : intel_dp->train_set, intel_dp->lane_count);
3663 :
3664 0 : return ret == intel_dp->lane_count;
3665 : }
3666 :
3667 0 : static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3668 : {
3669 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3670 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
3671 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3672 0 : enum port port = intel_dig_port->port;
3673 : uint32_t val;
3674 :
3675 0 : if (!HAS_DDI(dev))
3676 0 : return;
3677 :
3678 0 : val = I915_READ(DP_TP_CTL(port));
3679 0 : val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3680 0 : val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3681 0 : I915_WRITE(DP_TP_CTL(port), val);
3682 :
3683 : /*
3684 : * On PORT_A we can have only eDP in SST mode. There the only reason
3685 : * we need to set idle transmission mode is to work around a HW issue
3686 : * where we enable the pipe while not in idle link-training mode.
3687 : * In this case there is requirement to wait for a minimum number of
3688 : * idle patterns to be sent.
3689 : */
3690 0 : if (port == PORT_A)
3691 0 : return;
3692 :
3693 0 : if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3694 : 1))
3695 0 : DRM_ERROR("Timed out waiting for DP idle patterns\n");
3696 0 : }
3697 :
3698 : /* Enable corresponding port and start training pattern 1 */
3699 : static void
3700 0 : intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3701 : {
3702 0 : struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3703 0 : struct drm_device *dev = encoder->dev;
3704 : int i;
3705 : uint8_t voltage;
3706 : int voltage_tries, loop_tries;
3707 0 : uint32_t DP = intel_dp->DP;
3708 0 : uint8_t link_config[2];
3709 0 : uint8_t link_bw, rate_select;
3710 :
3711 0 : if (HAS_DDI(dev))
3712 0 : intel_ddi_prepare_link_retrain(encoder);
3713 :
3714 0 : intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3715 : &link_bw, &rate_select);
3716 :
3717 : /* Write the link configuration data */
3718 0 : link_config[0] = link_bw;
3719 0 : link_config[1] = intel_dp->lane_count;
3720 0 : if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3721 0 : link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3722 0 : drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3723 0 : if (intel_dp->num_sink_rates)
3724 0 : drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3725 : &rate_select, 1);
3726 :
3727 0 : link_config[0] = 0;
3728 0 : link_config[1] = DP_SET_ANSI_8B10B;
3729 0 : drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3730 :
3731 0 : DP |= DP_PORT_EN;
3732 :
3733 : /* clock recovery */
3734 0 : if (!intel_dp_reset_link_train(intel_dp, &DP,
3735 : DP_TRAINING_PATTERN_1 |
3736 : DP_LINK_SCRAMBLING_DISABLE)) {
3737 0 : DRM_ERROR("failed to enable link training\n");
3738 0 : return;
3739 : }
3740 :
3741 : voltage = 0xff;
3742 : voltage_tries = 0;
3743 : loop_tries = 0;
3744 0 : for (;;) {
3745 0 : uint8_t link_status[DP_LINK_STATUS_SIZE];
3746 :
3747 0 : drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3748 0 : if (!intel_dp_get_link_status(intel_dp, link_status)) {
3749 0 : DRM_ERROR("failed to get link status\n");
3750 0 : break;
3751 : }
3752 :
3753 0 : if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3754 : DRM_DEBUG_KMS("clock recovery OK\n");
3755 0 : break;
3756 : }
3757 :
3758 :
3759 : /* Check to see if we've tried the max voltage */
3760 0 : for (i = 0; i < intel_dp->lane_count; i++)
3761 0 : if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3762 : break;
3763 0 : if (i == intel_dp->lane_count) {
3764 0 : ++loop_tries;
3765 0 : if (loop_tries == 5) {
3766 0 : DRM_ERROR("too many full retries, give up\n");
3767 0 : break;
3768 : }
3769 0 : intel_dp_reset_link_train(intel_dp, &DP,
3770 : DP_TRAINING_PATTERN_1 |
3771 : DP_LINK_SCRAMBLING_DISABLE);
3772 : voltage_tries = 0;
3773 0 : continue;
3774 : }
3775 :
3776 : /* Check to see if we've tried the same voltage 5 times */
3777 0 : if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3778 0 : ++voltage_tries;
3779 0 : if (voltage_tries == 5) {
3780 0 : DRM_ERROR("too many voltage retries, give up\n");
3781 0 : break;
3782 : }
3783 : } else
3784 : voltage_tries = 0;
3785 0 : voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3786 :
3787 : /* Update training set as requested by target */
3788 0 : if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3789 0 : DRM_ERROR("failed to update link training\n");
3790 0 : break;
3791 : }
3792 0 : }
3793 :
3794 0 : intel_dp->DP = DP;
3795 0 : }
3796 :
3797 : static void
3798 0 : intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3799 : {
3800 0 : struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3801 0 : struct drm_device *dev = dig_port->base.base.dev;
3802 : bool channel_eq = false;
3803 : int tries, cr_tries;
3804 0 : uint32_t DP = intel_dp->DP;
3805 : uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3806 :
3807 : /*
3808 : * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3809 : *
3810 : * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3811 : * also mandatory for downstream devices that support HBR2.
3812 : *
3813 : * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3814 : * supported but still not enabled.
3815 : */
3816 0 : if (intel_dp_source_supports_hbr2(dev) &&
3817 0 : drm_dp_tps3_supported(intel_dp->dpcd))
3818 0 : training_pattern = DP_TRAINING_PATTERN_3;
3819 0 : else if (intel_dp->link_rate == 540000)
3820 0 : DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3821 :
3822 : /* channel equalization */
3823 0 : if (!intel_dp_set_link_train(intel_dp, &DP,
3824 0 : training_pattern |
3825 : DP_LINK_SCRAMBLING_DISABLE)) {
3826 0 : DRM_ERROR("failed to start channel equalization\n");
3827 0 : return;
3828 : }
3829 :
3830 : tries = 0;
3831 : cr_tries = 0;
3832 : channel_eq = false;
3833 0 : for (;;) {
3834 0 : uint8_t link_status[DP_LINK_STATUS_SIZE];
3835 :
3836 0 : if (cr_tries > 5) {
3837 0 : DRM_ERROR("failed to train DP, aborting\n");
3838 0 : break;
3839 : }
3840 :
3841 0 : drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3842 0 : if (!intel_dp_get_link_status(intel_dp, link_status)) {
3843 0 : DRM_ERROR("failed to get link status\n");
3844 0 : break;
3845 : }
3846 :
3847 : /* Make sure clock is still ok */
3848 0 : if (!drm_dp_clock_recovery_ok(link_status,
3849 0 : intel_dp->lane_count)) {
3850 0 : intel_dp_link_training_clock_recovery(intel_dp);
3851 0 : intel_dp_set_link_train(intel_dp, &DP,
3852 : training_pattern |
3853 : DP_LINK_SCRAMBLING_DISABLE);
3854 0 : cr_tries++;
3855 0 : continue;
3856 : }
3857 :
3858 0 : if (drm_dp_channel_eq_ok(link_status,
3859 0 : intel_dp->lane_count)) {
3860 : channel_eq = true;
3861 0 : break;
3862 : }
3863 :
3864 : /* Try 5 times, then try clock recovery if that fails */
3865 0 : if (tries > 5) {
3866 0 : intel_dp_link_training_clock_recovery(intel_dp);
3867 0 : intel_dp_set_link_train(intel_dp, &DP,
3868 : training_pattern |
3869 : DP_LINK_SCRAMBLING_DISABLE);
3870 : tries = 0;
3871 0 : cr_tries++;
3872 0 : continue;
3873 : }
3874 :
3875 : /* Update training set as requested by target */
3876 0 : if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3877 0 : DRM_ERROR("failed to update link training\n");
3878 0 : break;
3879 : }
3880 0 : ++tries;
3881 0 : }
3882 :
3883 0 : intel_dp_set_idle_link_train(intel_dp);
3884 :
3885 0 : intel_dp->DP = DP;
3886 :
3887 : if (channel_eq)
3888 : DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3889 0 : }
3890 :
3891 0 : void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3892 : {
3893 0 : intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3894 : DP_TRAINING_PATTERN_DISABLE);
3895 0 : }
3896 :
3897 : void
3898 0 : intel_dp_start_link_train(struct intel_dp *intel_dp)
3899 : {
3900 0 : intel_dp_link_training_clock_recovery(intel_dp);
3901 0 : intel_dp_link_training_channel_equalization(intel_dp);
3902 0 : }
3903 :
3904 : static void
3905 0 : intel_dp_link_down(struct intel_dp *intel_dp)
3906 : {
3907 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3908 0 : struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3909 0 : enum port port = intel_dig_port->port;
3910 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
3911 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3912 0 : uint32_t DP = intel_dp->DP;
3913 :
3914 0 : if (WARN_ON(HAS_DDI(dev)))
3915 0 : return;
3916 :
3917 0 : if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3918 0 : return;
3919 :
3920 : DRM_DEBUG_KMS("\n");
3921 :
3922 0 : if ((IS_GEN7(dev) && port == PORT_A) ||
3923 0 : (HAS_PCH_CPT(dev) && port != PORT_A)) {
3924 0 : DP &= ~DP_LINK_TRAIN_MASK_CPT;
3925 0 : DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3926 0 : } else {
3927 0 : if (IS_CHERRYVIEW(dev))
3928 0 : DP &= ~DP_LINK_TRAIN_MASK_CHV;
3929 : else
3930 0 : DP &= ~DP_LINK_TRAIN_MASK;
3931 0 : DP |= DP_LINK_TRAIN_PAT_IDLE;
3932 : }
3933 0 : I915_WRITE(intel_dp->output_reg, DP);
3934 0 : POSTING_READ(intel_dp->output_reg);
3935 :
3936 0 : DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3937 0 : I915_WRITE(intel_dp->output_reg, DP);
3938 0 : POSTING_READ(intel_dp->output_reg);
3939 :
3940 : /*
3941 : * HW workaround for IBX, we need to move the port
3942 : * to transcoder A after disabling it to allow the
3943 : * matching HDMI port to be enabled on transcoder A.
3944 : */
3945 0 : if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3946 : /* always enable with pattern 1 (as per spec) */
3947 0 : DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3948 0 : DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3949 0 : I915_WRITE(intel_dp->output_reg, DP);
3950 0 : POSTING_READ(intel_dp->output_reg);
3951 :
3952 : DP &= ~DP_PORT_EN;
3953 0 : I915_WRITE(intel_dp->output_reg, DP);
3954 0 : POSTING_READ(intel_dp->output_reg);
3955 0 : }
3956 :
3957 0 : drm_msleep(intel_dp->panel_power_down_delay);
3958 0 : }
3959 :
3960 : static bool
3961 0 : intel_dp_get_dpcd(struct intel_dp *intel_dp)
3962 : {
3963 0 : struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3964 0 : struct drm_device *dev = dig_port->base.base.dev;
3965 0 : struct drm_i915_private *dev_priv = dev->dev_private;
3966 0 : uint8_t rev;
3967 :
3968 0 : if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3969 0 : sizeof(intel_dp->dpcd)) < 0)
3970 0 : return false; /* aux transfer failed */
3971 :
3972 : DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3973 :
3974 0 : if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3975 0 : return false; /* DPCD not present */
3976 :
3977 : /* Check if the panel supports PSR */
3978 0 : memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3979 0 : if (is_edp(intel_dp)) {
3980 0 : intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3981 : intel_dp->psr_dpcd,
3982 : sizeof(intel_dp->psr_dpcd));
3983 0 : if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3984 0 : dev_priv->psr.sink_support = true;
3985 : DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3986 0 : }
3987 :
3988 0 : if (INTEL_INFO(dev)->gen >= 9 &&
3989 0 : (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3990 0 : uint8_t frame_sync_cap;
3991 :
3992 0 : dev_priv->psr.sink_support = true;
3993 0 : intel_dp_dpcd_read_wake(&intel_dp->aux,
3994 : DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3995 : &frame_sync_cap, 1);
3996 0 : dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3997 : /* PSR2 needs frame sync as well */
3998 0 : dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3999 : DRM_DEBUG_KMS("PSR2 %s on sink",
4000 : dev_priv->psr.psr2_support ? "supported" : "not supported");
4001 0 : }
4002 : }
4003 :
4004 : DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4005 : yesno(intel_dp_source_supports_hbr2(dev)),
4006 : yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4007 :
4008 : /* Intermediate frequency support */
4009 0 : if (is_edp(intel_dp) &&
4010 0 : (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4011 0 : (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4012 0 : (rev >= 0x03)) { /* eDp v1.4 or higher */
4013 0 : __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4014 : int i;
4015 :
4016 0 : intel_dp_dpcd_read_wake(&intel_dp->aux,
4017 : DP_SUPPORTED_LINK_RATES,
4018 0 : sink_rates,
4019 : sizeof(sink_rates));
4020 :
4021 0 : for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4022 0 : int val = le16_to_cpu(sink_rates[i]);
4023 :
4024 0 : if (val == 0)
4025 0 : break;
4026 :
4027 : /* Value read is in kHz while drm clock is saved in deca-kHz */
4028 0 : intel_dp->sink_rates[i] = (val * 200) / 10;
4029 0 : }
4030 0 : intel_dp->num_sink_rates = i;
4031 0 : }
4032 :
4033 0 : intel_dp_print_rates(intel_dp);
4034 :
4035 0 : if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4036 : DP_DWN_STRM_PORT_PRESENT))
4037 0 : return true; /* native DP sink */
4038 :
4039 0 : if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4040 0 : return true; /* no per-port downstream info */
4041 :
4042 0 : if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4043 0 : intel_dp->downstream_ports,
4044 0 : DP_MAX_DOWNSTREAM_PORTS) < 0)
4045 0 : return false; /* downstream port status fetch failed */
4046 :
4047 0 : return true;
4048 0 : }
4049 :
4050 : static void
4051 0 : intel_dp_probe_oui(struct intel_dp *intel_dp)
4052 : {
4053 0 : u8 buf[3];
4054 :
4055 0 : if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4056 0 : return;
4057 :
4058 0 : if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4059 : DRM_DEBUG_KMS("Sink OUI: %02x%02x%02x\n",
4060 : buf[0], buf[1], buf[2]);
4061 :
4062 0 : if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4063 : DRM_DEBUG_KMS("Branch OUI: %02x%02x%02x\n",
4064 : buf[0], buf[1], buf[2]);
4065 0 : }
4066 :
4067 : static bool
4068 0 : intel_dp_probe_mst(struct intel_dp *intel_dp)
4069 : {
4070 0 : u8 buf[1];
4071 :
4072 0 : if (!intel_dp->can_mst)
4073 0 : return false;
4074 :
4075 0 : if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4076 0 : return false;
4077 :
4078 0 : if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4079 0 : if (buf[0] & DP_MST_CAP) {
4080 : DRM_DEBUG_KMS("Sink is MST capable\n");
4081 0 : intel_dp->is_mst = true;
4082 0 : } else {
4083 : DRM_DEBUG_KMS("Sink is not MST capable\n");
4084 0 : intel_dp->is_mst = false;
4085 : }
4086 : }
4087 :
4088 0 : drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4089 0 : return intel_dp->is_mst;
4090 0 : }
4091 :
4092 0 : static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4093 : {
4094 0 : struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4095 0 : struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4096 0 : u8 buf;
4097 : int ret = 0;
4098 :
4099 0 : if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4100 : DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4101 : ret = -EIO;
4102 0 : goto out;
4103 : }
4104 :
4105 0 : if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4106 0 : buf & ~DP_TEST_SINK_START) < 0) {
4107 : DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4108 : ret = -EIO;
4109 0 : goto out;
4110 : }
4111 :
4112 0 : intel_dp->sink_crc.started = false;
4113 : out:
4114 0 : hsw_enable_ips(intel_crtc);
4115 0 : return ret;
4116 0 : }
4117 :
4118 0 : static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4119 : {
4120 0 : struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4121 0 : struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4122 0 : u8 buf;
4123 : int ret;
4124 :
4125 0 : if (intel_dp->sink_crc.started) {
4126 0 : ret = intel_dp_sink_crc_stop(intel_dp);
4127 0 : if (ret)
4128 0 : return ret;
4129 : }
4130 :
4131 0 : if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4132 0 : return -EIO;
4133 :
4134 0 : if (!(buf & DP_TEST_CRC_SUPPORTED))
4135 0 : return -ENOTTY;
4136 :
4137 0 : intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4138 :
4139 0 : if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4140 0 : return -EIO;
4141 :
4142 0 : hsw_disable_ips(intel_crtc);
4143 :
4144 0 : if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4145 0 : buf | DP_TEST_SINK_START) < 0) {
4146 0 : hsw_enable_ips(intel_crtc);
4147 0 : return -EIO;
4148 : }
4149 :
4150 0 : intel_dp->sink_crc.started = true;
4151 0 : return 0;
4152 0 : }
4153 :
4154 0 : int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4155 : {
4156 0 : struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4157 0 : struct drm_device *dev = dig_port->base.base.dev;
4158 0 : struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4159 0 : u8 buf;
4160 : int count, ret;
4161 : int attempts = 6;
4162 : bool old_equal_new;
4163 :
4164 0 : ret = intel_dp_sink_crc_start(intel_dp);
4165 0 : if (ret)
4166 0 : return ret;
4167 :
4168 0 : do {
4169 0 : intel_wait_for_vblank(dev, intel_crtc->pipe);
4170 :
4171 0 : if (drm_dp_dpcd_readb(&intel_dp->aux,
4172 0 : DP_TEST_SINK_MISC, &buf) < 0) {
4173 : ret = -EIO;
4174 0 : goto stop;
4175 : }
4176 0 : count = buf & DP_TEST_COUNT_MASK;
4177 :
4178 : /*
4179 : * Count might be reset during the loop. In this case
4180 : * last known count needs to be reset as well.
4181 : */
4182 0 : if (count == 0)
4183 0 : intel_dp->sink_crc.last_count = 0;
4184 :
4185 0 : if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4186 : ret = -EIO;
4187 0 : goto stop;
4188 : }
4189 :
4190 0 : old_equal_new = (count == intel_dp->sink_crc.last_count &&
4191 0 : !memcmp(intel_dp->sink_crc.last_crc, crc,
4192 : 6 * sizeof(u8)));
4193 :
4194 0 : } while (--attempts && (count == 0 || old_equal_new));
4195 :
4196 0 : intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4197 0 : memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4198 :
4199 0 : if (attempts == 0) {
4200 0 : if (old_equal_new) {
4201 : DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4202 : } else {
4203 0 : DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4204 : ret = -ETIMEDOUT;
4205 0 : goto stop;
4206 : }
4207 : }
4208 :
4209 : stop:
4210 0 : intel_dp_sink_crc_stop(intel_dp);
4211 0 : return ret;
4212 0 : }
4213 :
4214 : static bool
4215 0 : intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4216 : {
4217 0 : return intel_dp_dpcd_read_wake(&intel_dp->aux,
4218 : DP_DEVICE_SERVICE_IRQ_VECTOR,
4219 0 : sink_irq_vector, 1) == 1;
4220 : }
4221 :
4222 : static bool
4223 0 : intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4224 : {
4225 : int ret;
4226 :
4227 0 : ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4228 : DP_SINK_COUNT_ESI,
4229 : sink_irq_vector, 14);
4230 0 : if (ret != 14)
4231 0 : return false;
4232 :
4233 0 : return true;
4234 0 : }
4235 :
4236 0 : static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4237 : {
4238 : uint8_t test_result = DP_TEST_ACK;
4239 0 : return test_result;
4240 : }
4241 :
4242 0 : static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4243 : {
4244 : uint8_t test_result = DP_TEST_NAK;
4245 0 : return test_result;
4246 : }
4247 :
4248 0 : static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4249 : {
4250 : uint8_t test_result = DP_TEST_NAK;
4251 0 : struct intel_connector *intel_connector = intel_dp->attached_connector;
4252 0 : struct drm_connector *connector = &intel_connector->base;
4253 :
4254 0 : if (intel_connector->detect_edid == NULL ||
4255 0 : connector->edid_corrupt ||
4256 0 : intel_dp->aux.i2c_defer_count > 6) {
4257 : /* Check EDID read for NACKs, DEFERs and corruption
4258 : * (DP CTS 1.2 Core r1.1)
4259 : * 4.2.2.4 : Failed EDID read, I2C_NAK
4260 : * 4.2.2.5 : Failed EDID read, I2C_DEFER
4261 : * 4.2.2.6 : EDID corruption detected
4262 : * Use failsafe mode for all cases
4263 : */
4264 0 : if (intel_dp->aux.i2c_nack_count > 0 ||
4265 : intel_dp->aux.i2c_defer_count > 0)
4266 : DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4267 : intel_dp->aux.i2c_nack_count,
4268 : intel_dp->aux.i2c_defer_count);
4269 0 : intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4270 0 : } else {
4271 : struct edid *block = intel_connector->detect_edid;
4272 :
4273 : /* We have to write the checksum
4274 : * of the last block read
4275 : */
4276 0 : block += intel_connector->detect_edid->extensions;
4277 :
4278 0 : if (!drm_dp_dpcd_write(&intel_dp->aux,
4279 : DP_TEST_EDID_CHECKSUM,
4280 0 : &block->checksum,
4281 : 1))
4282 : DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4283 :
4284 : test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4285 0 : intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4286 : }
4287 :
4288 : /* Set test active flag here so userspace doesn't interrupt things */
4289 0 : intel_dp->compliance_test_active = 1;
4290 :
4291 0 : return test_result;
4292 : }
4293 :
4294 0 : static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4295 : {
4296 : uint8_t test_result = DP_TEST_NAK;
4297 0 : return test_result;
4298 : }
4299 :
4300 0 : static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4301 : {
4302 0 : uint8_t response = DP_TEST_NAK;
4303 0 : uint8_t rxdata = 0;
4304 : int status = 0;
4305 :
4306 0 : intel_dp->compliance_test_active = 0;
4307 0 : intel_dp->compliance_test_type = 0;
4308 0 : intel_dp->compliance_test_data = 0;
4309 :
4310 0 : intel_dp->aux.i2c_nack_count = 0;
4311 0 : intel_dp->aux.i2c_defer_count = 0;
4312 :
4313 0 : status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4314 0 : if (status <= 0) {
4315 : DRM_DEBUG_KMS("Could not read test request from sink\n");
4316 : goto update_status;
4317 : }
4318 :
4319 0 : switch (rxdata) {
4320 : case DP_TEST_LINK_TRAINING:
4321 : DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4322 0 : intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4323 0 : response = intel_dp_autotest_link_training(intel_dp);
4324 0 : break;
4325 : case DP_TEST_LINK_VIDEO_PATTERN:
4326 : DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4327 0 : intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4328 0 : response = intel_dp_autotest_video_pattern(intel_dp);
4329 0 : break;
4330 : case DP_TEST_LINK_EDID_READ:
4331 : DRM_DEBUG_KMS("EDID test requested\n");
4332 0 : intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4333 0 : response = intel_dp_autotest_edid(intel_dp);
4334 0 : break;
4335 : case DP_TEST_LINK_PHY_TEST_PATTERN:
4336 : DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4337 0 : intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4338 0 : response = intel_dp_autotest_phy_pattern(intel_dp);
4339 0 : break;
4340 : default:
4341 : DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4342 : break;
4343 : }
4344 :
4345 : update_status:
4346 0 : status = drm_dp_dpcd_write(&intel_dp->aux,
4347 : DP_TEST_RESPONSE,
4348 : &response, 1);
4349 : if (status <= 0)
4350 : DRM_DEBUG_KMS("Could not write test response to sink\n");
4351 0 : }
4352 :
4353 : static int
4354 0 : intel_dp_check_mst_status(struct intel_dp *intel_dp)
4355 : {
4356 : bool bret;
4357 :
4358 0 : if (intel_dp->is_mst) {
4359 0 : u8 esi[16] = { 0 };
4360 : int ret = 0;
4361 : int retry;
4362 0 : bool handled;
4363 0 : bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4364 : go_again:
4365 0 : if (bret == true) {
4366 :
4367 : /* check link status - esi[10] = 0x200c */
4368 0 : if (intel_dp->active_mst_links &&
4369 0 : !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4370 : DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4371 0 : intel_dp_start_link_train(intel_dp);
4372 0 : intel_dp_stop_link_train(intel_dp);
4373 0 : }
4374 :
4375 : DRM_DEBUG_KMS("got esi %3ph\n", esi);
4376 0 : ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4377 :
4378 0 : if (handled) {
4379 0 : for (retry = 0; retry < 3; retry++) {
4380 : int wret;
4381 0 : wret = drm_dp_dpcd_write(&intel_dp->aux,
4382 : DP_SINK_COUNT_ESI+1,
4383 0 : &esi[1], 3);
4384 0 : if (wret == 3) {
4385 0 : break;
4386 : }
4387 0 : }
4388 :
4389 0 : bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4390 0 : if (bret == true) {
4391 : DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4392 0 : goto go_again;
4393 : }
4394 : } else
4395 : ret = 0;
4396 :
4397 0 : return ret;
4398 : } else {
4399 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4400 : DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4401 0 : intel_dp->is_mst = false;
4402 0 : drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4403 : /* send a hotplug event */
4404 0 : drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4405 : }
4406 0 : }
4407 0 : return -EINVAL;
4408 0 : }
4409 :
4410 : /*
4411 : * According to DP spec
4412 : * 5.1.2:
4413 : * 1. Read DPCD
4414 : * 2. Configure link according to Receiver Capabilities
4415 : * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4416 : * 4. Check link status on receipt of hot-plug interrupt
4417 : */
4418 : static void
4419 0 : intel_dp_check_link_status(struct intel_dp *intel_dp)
4420 : {
4421 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
4422 0 : struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4423 0 : u8 sink_irq_vector;
4424 0 : u8 link_status[DP_LINK_STATUS_SIZE];
4425 :
4426 0 : WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4427 :
4428 0 : if (!intel_encoder->base.crtc)
4429 0 : return;
4430 :
4431 0 : if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4432 0 : return;
4433 :
4434 : /* Try to read receiver status if the link appears to be up */
4435 0 : if (!intel_dp_get_link_status(intel_dp, link_status)) {
4436 0 : return;
4437 : }
4438 :
4439 : /* Now read the DPCD to see if it's actually running */
4440 0 : if (!intel_dp_get_dpcd(intel_dp)) {
4441 0 : return;
4442 : }
4443 :
4444 : /* Try to read the source of the interrupt */
4445 0 : if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4446 0 : intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4447 : /* Clear interrupt source */
4448 0 : drm_dp_dpcd_writeb(&intel_dp->aux,
4449 : DP_DEVICE_SERVICE_IRQ_VECTOR,
4450 0 : sink_irq_vector);
4451 :
4452 0 : if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4453 : DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4454 0 : if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4455 : DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4456 0 : }
4457 :
4458 0 : if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4459 : DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4460 : intel_encoder->base.name);
4461 0 : intel_dp_start_link_train(intel_dp);
4462 0 : intel_dp_stop_link_train(intel_dp);
4463 0 : }
4464 0 : }
4465 :
4466 : /* XXX this is probably wrong for multiple downstream ports */
4467 : static enum drm_connector_status
4468 0 : intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4469 : {
4470 0 : uint8_t *dpcd = intel_dp->dpcd;
4471 : uint8_t type;
4472 :
4473 0 : if (!intel_dp_get_dpcd(intel_dp))
4474 0 : return connector_status_disconnected;
4475 :
4476 : /* if there's no downstream port, we're done */
4477 0 : if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4478 0 : return connector_status_connected;
4479 :
4480 : /* If we're HPD-aware, SINK_COUNT changes dynamically */
4481 0 : if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4482 0 : intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4483 0 : uint8_t reg;
4484 :
4485 0 : if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4486 0 : ®, 1) < 0)
4487 0 : return connector_status_unknown;
4488 :
4489 0 : return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4490 : : connector_status_disconnected;
4491 0 : }
4492 :
4493 : /* If no HPD, poke DDC gently */
4494 0 : if (drm_probe_ddc(&intel_dp->aux.ddc))
4495 0 : return connector_status_connected;
4496 :
4497 : /* Well we tried, say unknown for unreliable port types */
4498 0 : if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4499 0 : type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4500 0 : if (type == DP_DS_PORT_TYPE_VGA ||
4501 0 : type == DP_DS_PORT_TYPE_NON_EDID)
4502 0 : return connector_status_unknown;
4503 : } else {
4504 0 : type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4505 : DP_DWN_STRM_PORT_TYPE_MASK;
4506 0 : if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4507 0 : type == DP_DWN_STRM_PORT_TYPE_OTHER)
4508 0 : return connector_status_unknown;
4509 : }
4510 :
4511 : /* Anything else is out of spec, warn and ignore */
4512 : DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4513 0 : return connector_status_disconnected;
4514 0 : }
4515 :
4516 : static enum drm_connector_status
4517 0 : edp_detect(struct intel_dp *intel_dp)
4518 : {
4519 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
4520 : enum drm_connector_status status;
4521 :
4522 0 : status = intel_panel_detect(dev);
4523 0 : if (status == connector_status_unknown)
4524 : status = connector_status_connected;
4525 :
4526 0 : return status;
4527 : }
4528 :
4529 0 : static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4530 : struct intel_digital_port *port)
4531 : {
4532 : u32 bit;
4533 :
4534 0 : switch (port->port) {
4535 : case PORT_A:
4536 0 : return true;
4537 : case PORT_B:
4538 : bit = SDE_PORTB_HOTPLUG;
4539 0 : break;
4540 : case PORT_C:
4541 : bit = SDE_PORTC_HOTPLUG;
4542 0 : break;
4543 : case PORT_D:
4544 : bit = SDE_PORTD_HOTPLUG;
4545 0 : break;
4546 : default:
4547 0 : MISSING_CASE(port->port);
4548 0 : return false;
4549 : }
4550 :
4551 0 : return I915_READ(SDEISR) & bit;
4552 0 : }
4553 :
4554 0 : static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4555 : struct intel_digital_port *port)
4556 : {
4557 : u32 bit;
4558 :
4559 0 : switch (port->port) {
4560 : case PORT_A:
4561 0 : return true;
4562 : case PORT_B:
4563 : bit = SDE_PORTB_HOTPLUG_CPT;
4564 0 : break;
4565 : case PORT_C:
4566 : bit = SDE_PORTC_HOTPLUG_CPT;
4567 0 : break;
4568 : case PORT_D:
4569 : bit = SDE_PORTD_HOTPLUG_CPT;
4570 0 : break;
4571 : case PORT_E:
4572 : bit = SDE_PORTE_HOTPLUG_SPT;
4573 0 : break;
4574 : default:
4575 0 : MISSING_CASE(port->port);
4576 0 : return false;
4577 : }
4578 :
4579 0 : return I915_READ(SDEISR) & bit;
4580 0 : }
4581 :
4582 0 : static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4583 : struct intel_digital_port *port)
4584 : {
4585 : u32 bit;
4586 :
4587 0 : switch (port->port) {
4588 : case PORT_B:
4589 : bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4590 0 : break;
4591 : case PORT_C:
4592 : bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4593 0 : break;
4594 : case PORT_D:
4595 : bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4596 0 : break;
4597 : default:
4598 0 : MISSING_CASE(port->port);
4599 0 : return false;
4600 : }
4601 :
4602 0 : return I915_READ(PORT_HOTPLUG_STAT) & bit;
4603 0 : }
4604 :
4605 0 : static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4606 : struct intel_digital_port *port)
4607 : {
4608 : u32 bit;
4609 :
4610 0 : switch (port->port) {
4611 : case PORT_B:
4612 : bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4613 0 : break;
4614 : case PORT_C:
4615 : bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4616 0 : break;
4617 : case PORT_D:
4618 : bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4619 0 : break;
4620 : default:
4621 0 : MISSING_CASE(port->port);
4622 0 : return false;
4623 : }
4624 :
4625 0 : return I915_READ(PORT_HOTPLUG_STAT) & bit;
4626 0 : }
4627 :
4628 0 : static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4629 : struct intel_digital_port *intel_dig_port)
4630 : {
4631 0 : struct intel_encoder *intel_encoder = &intel_dig_port->base;
4632 0 : enum port port;
4633 : u32 bit;
4634 :
4635 0 : intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4636 0 : switch (port) {
4637 : case PORT_A:
4638 : bit = BXT_DE_PORT_HP_DDIA;
4639 0 : break;
4640 : case PORT_B:
4641 : bit = BXT_DE_PORT_HP_DDIB;
4642 0 : break;
4643 : case PORT_C:
4644 : bit = BXT_DE_PORT_HP_DDIC;
4645 0 : break;
4646 : default:
4647 0 : MISSING_CASE(port);
4648 0 : return false;
4649 : }
4650 :
4651 0 : return I915_READ(GEN8_DE_PORT_ISR) & bit;
4652 0 : }
4653 :
4654 : /*
4655 : * intel_digital_port_connected - is the specified port connected?
4656 : * @dev_priv: i915 private structure
4657 : * @port: the port to test
4658 : *
4659 : * Return %true if @port is connected, %false otherwise.
4660 : */
4661 0 : static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4662 : struct intel_digital_port *port)
4663 : {
4664 0 : if (HAS_PCH_IBX(dev_priv))
4665 0 : return ibx_digital_port_connected(dev_priv, port);
4666 0 : if (HAS_PCH_SPLIT(dev_priv))
4667 0 : return cpt_digital_port_connected(dev_priv, port);
4668 0 : else if (IS_BROXTON(dev_priv))
4669 0 : return bxt_digital_port_connected(dev_priv, port);
4670 0 : else if (IS_GM45(dev_priv))
4671 0 : return gm45_digital_port_connected(dev_priv, port);
4672 : else
4673 0 : return g4x_digital_port_connected(dev_priv, port);
4674 0 : }
4675 :
4676 : static enum drm_connector_status
4677 0 : ironlake_dp_detect(struct intel_dp *intel_dp)
4678 : {
4679 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
4680 0 : struct drm_i915_private *dev_priv = dev->dev_private;
4681 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4682 :
4683 0 : if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4684 0 : return connector_status_disconnected;
4685 :
4686 0 : return intel_dp_detect_dpcd(intel_dp);
4687 0 : }
4688 :
4689 : static enum drm_connector_status
4690 0 : g4x_dp_detect(struct intel_dp *intel_dp)
4691 : {
4692 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
4693 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4694 :
4695 : /* Can't disconnect eDP, but you can close the lid... */
4696 0 : if (is_edp(intel_dp)) {
4697 : enum drm_connector_status status;
4698 :
4699 0 : status = intel_panel_detect(dev);
4700 0 : if (status == connector_status_unknown)
4701 : status = connector_status_connected;
4702 : return status;
4703 : }
4704 :
4705 0 : if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4706 0 : return connector_status_disconnected;
4707 :
4708 0 : return intel_dp_detect_dpcd(intel_dp);
4709 0 : }
4710 :
4711 : static struct edid *
4712 0 : intel_dp_get_edid(struct intel_dp *intel_dp)
4713 : {
4714 0 : struct intel_connector *intel_connector = intel_dp->attached_connector;
4715 :
4716 : /* use cached edid if we have one */
4717 0 : if (intel_connector->edid) {
4718 : /* invalid edid */
4719 0 : if (IS_ERR(intel_connector->edid))
4720 0 : return NULL;
4721 :
4722 0 : return drm_edid_duplicate(intel_connector->edid);
4723 : } else
4724 0 : return drm_get_edid(&intel_connector->base,
4725 0 : &intel_dp->aux.ddc);
4726 0 : }
4727 :
4728 : static void
4729 0 : intel_dp_set_edid(struct intel_dp *intel_dp)
4730 : {
4731 0 : struct intel_connector *intel_connector = intel_dp->attached_connector;
4732 : struct edid *edid;
4733 :
4734 0 : edid = intel_dp_get_edid(intel_dp);
4735 0 : intel_connector->detect_edid = edid;
4736 :
4737 0 : if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4738 0 : intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4739 : else
4740 0 : intel_dp->has_audio = drm_detect_monitor_audio(edid);
4741 0 : }
4742 :
4743 : static void
4744 0 : intel_dp_unset_edid(struct intel_dp *intel_dp)
4745 : {
4746 0 : struct intel_connector *intel_connector = intel_dp->attached_connector;
4747 :
4748 0 : kfree(intel_connector->detect_edid);
4749 0 : intel_connector->detect_edid = NULL;
4750 :
4751 0 : intel_dp->has_audio = false;
4752 0 : }
4753 :
4754 : static enum drm_connector_status
4755 0 : intel_dp_detect(struct drm_connector *connector, bool force)
4756 : {
4757 0 : struct intel_dp *intel_dp = intel_attached_dp(connector);
4758 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4759 0 : struct intel_encoder *intel_encoder = &intel_dig_port->base;
4760 0 : struct drm_device *dev = connector->dev;
4761 : enum drm_connector_status status;
4762 : enum intel_display_power_domain power_domain;
4763 : bool ret;
4764 0 : u8 sink_irq_vector;
4765 :
4766 : DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4767 : connector->base.id, connector->name);
4768 0 : intel_dp_unset_edid(intel_dp);
4769 :
4770 0 : if (intel_dp->is_mst) {
4771 : /* MST devices are disconnected from a monitor POV */
4772 0 : if (intel_encoder->type != INTEL_OUTPUT_EDP)
4773 0 : intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4774 0 : return connector_status_disconnected;
4775 : }
4776 :
4777 0 : power_domain = intel_display_port_aux_power_domain(intel_encoder);
4778 0 : intel_display_power_get(to_i915(dev), power_domain);
4779 :
4780 : /* Can't disconnect eDP, but you can close the lid... */
4781 0 : if (is_edp(intel_dp))
4782 0 : status = edp_detect(intel_dp);
4783 0 : else if (HAS_PCH_SPLIT(dev))
4784 0 : status = ironlake_dp_detect(intel_dp);
4785 : else
4786 0 : status = g4x_dp_detect(intel_dp);
4787 0 : if (status != connector_status_connected)
4788 : goto out;
4789 :
4790 0 : intel_dp_probe_oui(intel_dp);
4791 :
4792 0 : ret = intel_dp_probe_mst(intel_dp);
4793 0 : if (ret) {
4794 : /* if we are in MST mode then this connector
4795 : won't appear connected or have anything with EDID on it */
4796 0 : if (intel_encoder->type != INTEL_OUTPUT_EDP)
4797 0 : intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4798 : status = connector_status_disconnected;
4799 0 : goto out;
4800 : }
4801 :
4802 0 : intel_dp_set_edid(intel_dp);
4803 :
4804 0 : if (intel_encoder->type != INTEL_OUTPUT_EDP)
4805 0 : intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4806 : status = connector_status_connected;
4807 :
4808 : /* Try to read the source of the interrupt */
4809 0 : if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4810 0 : intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4811 : /* Clear interrupt source */
4812 0 : drm_dp_dpcd_writeb(&intel_dp->aux,
4813 : DP_DEVICE_SERVICE_IRQ_VECTOR,
4814 0 : sink_irq_vector);
4815 :
4816 0 : if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4817 0 : intel_dp_handle_test_request(intel_dp);
4818 0 : if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4819 : DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4820 0 : }
4821 :
4822 : out:
4823 0 : intel_display_power_put(to_i915(dev), power_domain);
4824 0 : return status;
4825 0 : }
4826 :
4827 : static void
4828 0 : intel_dp_force(struct drm_connector *connector)
4829 : {
4830 0 : struct intel_dp *intel_dp = intel_attached_dp(connector);
4831 0 : struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4832 0 : struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4833 : enum intel_display_power_domain power_domain;
4834 :
4835 : DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4836 : connector->base.id, connector->name);
4837 0 : intel_dp_unset_edid(intel_dp);
4838 :
4839 0 : if (connector->status != connector_status_connected)
4840 0 : return;
4841 :
4842 0 : power_domain = intel_display_port_aux_power_domain(intel_encoder);
4843 0 : intel_display_power_get(dev_priv, power_domain);
4844 :
4845 0 : intel_dp_set_edid(intel_dp);
4846 :
4847 0 : intel_display_power_put(dev_priv, power_domain);
4848 :
4849 0 : if (intel_encoder->type != INTEL_OUTPUT_EDP)
4850 0 : intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4851 0 : }
4852 :
4853 0 : static int intel_dp_get_modes(struct drm_connector *connector)
4854 : {
4855 0 : struct intel_connector *intel_connector = to_intel_connector(connector);
4856 : struct edid *edid;
4857 :
4858 0 : edid = intel_connector->detect_edid;
4859 0 : if (edid) {
4860 0 : int ret = intel_connector_update_modes(connector, edid);
4861 0 : if (ret)
4862 0 : return ret;
4863 0 : }
4864 :
4865 : /* if eDP has no EDID, fall back to fixed mode */
4866 0 : if (is_edp(intel_attached_dp(connector)) &&
4867 0 : intel_connector->panel.fixed_mode) {
4868 : struct drm_display_mode *mode;
4869 :
4870 0 : mode = drm_mode_duplicate(connector->dev,
4871 : intel_connector->panel.fixed_mode);
4872 0 : if (mode) {
4873 0 : drm_mode_probed_add(connector, mode);
4874 0 : return 1;
4875 : }
4876 0 : }
4877 :
4878 0 : return 0;
4879 0 : }
4880 :
4881 : static bool
4882 0 : intel_dp_detect_audio(struct drm_connector *connector)
4883 : {
4884 : bool has_audio = false;
4885 : struct edid *edid;
4886 :
4887 0 : edid = to_intel_connector(connector)->detect_edid;
4888 0 : if (edid)
4889 0 : has_audio = drm_detect_monitor_audio(edid);
4890 :
4891 0 : return has_audio;
4892 : }
4893 :
4894 : static int
4895 0 : intel_dp_set_property(struct drm_connector *connector,
4896 : struct drm_property *property,
4897 : uint64_t val)
4898 : {
4899 0 : struct drm_i915_private *dev_priv = connector->dev->dev_private;
4900 0 : struct intel_connector *intel_connector = to_intel_connector(connector);
4901 0 : struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4902 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4903 : int ret;
4904 :
4905 0 : ret = drm_object_property_set_value(&connector->base, property, val);
4906 0 : if (ret)
4907 0 : return ret;
4908 :
4909 0 : if (property == dev_priv->force_audio_property) {
4910 0 : int i = val;
4911 : bool has_audio;
4912 :
4913 0 : if (i == intel_dp->force_audio)
4914 0 : return 0;
4915 :
4916 0 : intel_dp->force_audio = i;
4917 :
4918 0 : if (i == HDMI_AUDIO_AUTO)
4919 0 : has_audio = intel_dp_detect_audio(connector);
4920 : else
4921 0 : has_audio = (i == HDMI_AUDIO_ON);
4922 :
4923 0 : if (has_audio == intel_dp->has_audio)
4924 0 : return 0;
4925 :
4926 0 : intel_dp->has_audio = has_audio;
4927 0 : goto done;
4928 : }
4929 :
4930 0 : if (property == dev_priv->broadcast_rgb_property) {
4931 0 : bool old_auto = intel_dp->color_range_auto;
4932 0 : bool old_range = intel_dp->limited_color_range;
4933 :
4934 0 : switch (val) {
4935 : case INTEL_BROADCAST_RGB_AUTO:
4936 0 : intel_dp->color_range_auto = true;
4937 0 : break;
4938 : case INTEL_BROADCAST_RGB_FULL:
4939 0 : intel_dp->color_range_auto = false;
4940 0 : intel_dp->limited_color_range = false;
4941 0 : break;
4942 : case INTEL_BROADCAST_RGB_LIMITED:
4943 0 : intel_dp->color_range_auto = false;
4944 0 : intel_dp->limited_color_range = true;
4945 0 : break;
4946 : default:
4947 0 : return -EINVAL;
4948 : }
4949 :
4950 0 : if (old_auto == intel_dp->color_range_auto &&
4951 0 : old_range == intel_dp->limited_color_range)
4952 0 : return 0;
4953 :
4954 0 : goto done;
4955 : }
4956 :
4957 0 : if (is_edp(intel_dp) &&
4958 0 : property == connector->dev->mode_config.scaling_mode_property) {
4959 0 : if (val == DRM_MODE_SCALE_NONE) {
4960 : DRM_DEBUG_KMS("no scaling not supported\n");
4961 0 : return -EINVAL;
4962 : }
4963 :
4964 0 : if (intel_connector->panel.fitting_mode == val) {
4965 : /* the eDP scaling property is not changed */
4966 0 : return 0;
4967 : }
4968 0 : intel_connector->panel.fitting_mode = val;
4969 :
4970 0 : goto done;
4971 : }
4972 :
4973 0 : return -EINVAL;
4974 :
4975 : done:
4976 0 : if (intel_encoder->base.crtc)
4977 0 : intel_crtc_restore_mode(intel_encoder->base.crtc);
4978 :
4979 0 : return 0;
4980 0 : }
4981 :
4982 : static void
4983 0 : intel_dp_connector_destroy(struct drm_connector *connector)
4984 : {
4985 0 : struct intel_connector *intel_connector = to_intel_connector(connector);
4986 :
4987 0 : kfree(intel_connector->detect_edid);
4988 :
4989 0 : if (!IS_ERR_OR_NULL(intel_connector->edid))
4990 0 : kfree(intel_connector->edid);
4991 :
4992 : /* Can't call is_edp() since the encoder may have been destroyed
4993 : * already. */
4994 0 : if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4995 0 : intel_panel_fini(&intel_connector->panel);
4996 :
4997 0 : drm_connector_cleanup(connector);
4998 0 : kfree(connector);
4999 0 : }
5000 :
5001 0 : void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5002 : {
5003 0 : struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5004 0 : struct intel_dp *intel_dp = &intel_dig_port->dp;
5005 :
5006 0 : drm_dp_aux_unregister(&intel_dp->aux);
5007 0 : intel_dp_mst_encoder_cleanup(intel_dig_port);
5008 0 : if (is_edp(intel_dp)) {
5009 0 : cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5010 : /*
5011 : * vdd might still be enabled do to the delayed vdd off.
5012 : * Make sure vdd is actually turned off here.
5013 : */
5014 0 : pps_lock(intel_dp);
5015 0 : edp_panel_vdd_off_sync(intel_dp);
5016 0 : pps_unlock(intel_dp);
5017 :
5018 0 : if (intel_dp->edp_notifier.notifier_call) {
5019 : unregister_reboot_notifier(&intel_dp->edp_notifier);
5020 0 : intel_dp->edp_notifier.notifier_call = NULL;
5021 0 : }
5022 : }
5023 0 : drm_encoder_cleanup(encoder);
5024 0 : kfree(intel_dig_port);
5025 0 : }
5026 :
5027 0 : void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5028 : {
5029 0 : struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5030 :
5031 0 : if (!is_edp(intel_dp))
5032 0 : return;
5033 :
5034 : /*
5035 : * vdd might still be enabled do to the delayed vdd off.
5036 : * Make sure vdd is actually turned off here.
5037 : */
5038 0 : cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5039 0 : pps_lock(intel_dp);
5040 0 : edp_panel_vdd_off_sync(intel_dp);
5041 0 : pps_unlock(intel_dp);
5042 0 : }
5043 :
5044 0 : static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5045 : {
5046 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5047 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
5048 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5049 : enum intel_display_power_domain power_domain;
5050 :
5051 : lockdep_assert_held(&dev_priv->pps_mutex);
5052 :
5053 0 : if (!edp_have_panel_vdd(intel_dp))
5054 0 : return;
5055 :
5056 : /*
5057 : * The VDD bit needs a power domain reference, so if the bit is
5058 : * already enabled when we boot or resume, grab this reference and
5059 : * schedule a vdd off, so we don't hold on to the reference
5060 : * indefinitely.
5061 : */
5062 : DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5063 0 : power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5064 0 : intel_display_power_get(dev_priv, power_domain);
5065 :
5066 0 : edp_panel_vdd_schedule_off(intel_dp);
5067 0 : }
5068 :
5069 0 : void intel_dp_encoder_reset(struct drm_encoder *encoder)
5070 : {
5071 0 : struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5072 0 : struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5073 :
5074 0 : if (!HAS_DDI(dev_priv))
5075 0 : intel_dp->DP = I915_READ(intel_dp->output_reg);
5076 :
5077 0 : if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5078 0 : return;
5079 :
5080 0 : pps_lock(intel_dp);
5081 :
5082 : /*
5083 : * Read out the current power sequencer assignment,
5084 : * in case the BIOS did something with it.
5085 : */
5086 0 : if (IS_VALLEYVIEW(encoder->dev))
5087 0 : vlv_initial_power_sequencer_setup(intel_dp);
5088 :
5089 0 : intel_edp_panel_vdd_sanitize(intel_dp);
5090 :
5091 0 : pps_unlock(intel_dp);
5092 0 : }
5093 :
5094 : static const struct drm_connector_funcs intel_dp_connector_funcs = {
5095 : .dpms = drm_atomic_helper_connector_dpms,
5096 : .detect = intel_dp_detect,
5097 : .force = intel_dp_force,
5098 : .fill_modes = drm_helper_probe_single_connector_modes,
5099 : .set_property = intel_dp_set_property,
5100 : .atomic_get_property = intel_connector_atomic_get_property,
5101 : .destroy = intel_dp_connector_destroy,
5102 : .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5103 : .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5104 : };
5105 :
5106 : static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5107 : .get_modes = intel_dp_get_modes,
5108 : .mode_valid = intel_dp_mode_valid,
5109 : .best_encoder = intel_best_encoder,
5110 : };
5111 :
5112 : static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5113 : .reset = intel_dp_encoder_reset,
5114 : .destroy = intel_dp_encoder_destroy,
5115 : };
5116 :
5117 : enum irqreturn
5118 0 : intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5119 : {
5120 0 : struct intel_dp *intel_dp = &intel_dig_port->dp;
5121 0 : struct intel_encoder *intel_encoder = &intel_dig_port->base;
5122 0 : struct drm_device *dev = intel_dig_port->base.base.dev;
5123 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5124 : enum intel_display_power_domain power_domain;
5125 : enum irqreturn ret = IRQ_NONE;
5126 :
5127 0 : if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5128 0 : intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5129 0 : intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5130 :
5131 0 : if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5132 : /*
5133 : * vdd off can generate a long pulse on eDP which
5134 : * would require vdd on to handle it, and thus we
5135 : * would end up in an endless cycle of
5136 : * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5137 : */
5138 : DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5139 : port_name(intel_dig_port->port));
5140 0 : return IRQ_HANDLED;
5141 : }
5142 :
5143 : DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5144 : port_name(intel_dig_port->port),
5145 : long_hpd ? "long" : "short");
5146 :
5147 0 : power_domain = intel_display_port_aux_power_domain(intel_encoder);
5148 0 : intel_display_power_get(dev_priv, power_domain);
5149 :
5150 0 : if (long_hpd) {
5151 0 : if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5152 : goto mst_fail;
5153 :
5154 0 : if (!intel_dp_get_dpcd(intel_dp)) {
5155 : goto mst_fail;
5156 : }
5157 :
5158 0 : intel_dp_probe_oui(intel_dp);
5159 :
5160 0 : if (!intel_dp_probe_mst(intel_dp)) {
5161 0 : drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5162 0 : intel_dp_check_link_status(intel_dp);
5163 0 : drm_modeset_unlock(&dev->mode_config.connection_mutex);
5164 0 : goto mst_fail;
5165 : }
5166 : } else {
5167 0 : if (intel_dp->is_mst) {
5168 0 : if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5169 : goto mst_fail;
5170 : }
5171 :
5172 0 : if (!intel_dp->is_mst) {
5173 0 : drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5174 0 : intel_dp_check_link_status(intel_dp);
5175 0 : drm_modeset_unlock(&dev->mode_config.connection_mutex);
5176 0 : }
5177 : }
5178 :
5179 : ret = IRQ_HANDLED;
5180 :
5181 0 : goto put_power;
5182 : mst_fail:
5183 : /* if we were in MST mode, and device is not there get out of MST mode */
5184 0 : if (intel_dp->is_mst) {
5185 : DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5186 0 : intel_dp->is_mst = false;
5187 0 : drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5188 0 : }
5189 : put_power:
5190 0 : intel_display_power_put(dev_priv, power_domain);
5191 :
5192 0 : return ret;
5193 0 : }
5194 :
5195 : /* Return which DP Port should be selected for Transcoder DP control */
5196 : int
5197 0 : intel_trans_dp_port_sel(struct drm_crtc *crtc)
5198 : {
5199 0 : struct drm_device *dev = crtc->dev;
5200 : struct intel_encoder *intel_encoder;
5201 : struct intel_dp *intel_dp;
5202 :
5203 0 : for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5204 0 : intel_dp = enc_to_intel_dp(&intel_encoder->base);
5205 :
5206 0 : if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5207 0 : intel_encoder->type == INTEL_OUTPUT_EDP)
5208 0 : return intel_dp->output_reg;
5209 : }
5210 :
5211 0 : return -1;
5212 0 : }
5213 :
5214 : /* check the VBT to see whether the eDP is on another port */
5215 0 : bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5216 : {
5217 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5218 : union child_device_config *p_child;
5219 : int i;
5220 : static const short port_mapping[] = {
5221 : [PORT_B] = DVO_PORT_DPB,
5222 : [PORT_C] = DVO_PORT_DPC,
5223 : [PORT_D] = DVO_PORT_DPD,
5224 : [PORT_E] = DVO_PORT_DPE,
5225 : };
5226 :
5227 : /*
5228 : * eDP not supported on g4x. so bail out early just
5229 : * for a bit extra safety in case the VBT is bonkers.
5230 : */
5231 0 : if (INTEL_INFO(dev)->gen < 5)
5232 0 : return false;
5233 :
5234 0 : if (port == PORT_A)
5235 0 : return true;
5236 :
5237 0 : if (!dev_priv->vbt.child_dev_num)
5238 0 : return false;
5239 :
5240 0 : for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5241 0 : p_child = dev_priv->vbt.child_dev + i;
5242 :
5243 0 : if (p_child->common.dvo_port == port_mapping[port] &&
5244 0 : (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5245 : (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5246 0 : return true;
5247 : }
5248 0 : return false;
5249 0 : }
5250 :
5251 : void
5252 0 : intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5253 : {
5254 0 : struct intel_connector *intel_connector = to_intel_connector(connector);
5255 :
5256 0 : intel_attach_force_audio_property(connector);
5257 0 : intel_attach_broadcast_rgb_property(connector);
5258 0 : intel_dp->color_range_auto = true;
5259 :
5260 0 : if (is_edp(intel_dp)) {
5261 0 : drm_mode_create_scaling_mode_property(connector->dev);
5262 0 : drm_object_attach_property(
5263 0 : &connector->base,
5264 0 : connector->dev->mode_config.scaling_mode_property,
5265 : DRM_MODE_SCALE_ASPECT);
5266 0 : intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5267 0 : }
5268 0 : }
5269 :
5270 0 : static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5271 : {
5272 0 : intel_dp->last_power_cycle = jiffies;
5273 0 : intel_dp->last_power_on = jiffies;
5274 0 : intel_dp->last_backlight_off = jiffies;
5275 0 : }
5276 :
5277 : static void
5278 0 : intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5279 : struct intel_dp *intel_dp)
5280 : {
5281 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5282 : struct edp_power_seq cur, vbt, spec,
5283 0 : *final = &intel_dp->pps_delays;
5284 : u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5285 : int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5286 :
5287 : lockdep_assert_held(&dev_priv->pps_mutex);
5288 :
5289 : /* already initialized? */
5290 0 : if (final->t11_t12 != 0)
5291 0 : return;
5292 :
5293 0 : if (IS_BROXTON(dev)) {
5294 : /*
5295 : * TODO: BXT has 2 sets of PPS registers.
5296 : * Correct Register for Broxton need to be identified
5297 : * using VBT. hardcoding for now
5298 : */
5299 : pp_ctrl_reg = BXT_PP_CONTROL(0);
5300 : pp_on_reg = BXT_PP_ON_DELAYS(0);
5301 : pp_off_reg = BXT_PP_OFF_DELAYS(0);
5302 0 : } else if (HAS_PCH_SPLIT(dev)) {
5303 : pp_ctrl_reg = PCH_PP_CONTROL;
5304 : pp_on_reg = PCH_PP_ON_DELAYS;
5305 : pp_off_reg = PCH_PP_OFF_DELAYS;
5306 : pp_div_reg = PCH_PP_DIVISOR;
5307 0 : } else {
5308 0 : enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5309 :
5310 0 : pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5311 0 : pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5312 0 : pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5313 0 : pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5314 : }
5315 :
5316 : /* Workaround: Need to write PP_CONTROL with the unlock key as
5317 : * the very first thing. */
5318 0 : pp_ctl = ironlake_get_pp_control(intel_dp);
5319 :
5320 0 : pp_on = I915_READ(pp_on_reg);
5321 0 : pp_off = I915_READ(pp_off_reg);
5322 0 : if (!IS_BROXTON(dev)) {
5323 0 : I915_WRITE(pp_ctrl_reg, pp_ctl);
5324 0 : pp_div = I915_READ(pp_div_reg);
5325 0 : }
5326 :
5327 : /* Pull timing values out of registers */
5328 0 : cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5329 : PANEL_POWER_UP_DELAY_SHIFT;
5330 :
5331 0 : cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5332 : PANEL_LIGHT_ON_DELAY_SHIFT;
5333 :
5334 0 : cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5335 : PANEL_LIGHT_OFF_DELAY_SHIFT;
5336 :
5337 0 : cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5338 : PANEL_POWER_DOWN_DELAY_SHIFT;
5339 :
5340 0 : if (IS_BROXTON(dev)) {
5341 0 : u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5342 : BXT_POWER_CYCLE_DELAY_SHIFT;
5343 0 : if (tmp > 0)
5344 0 : cur.t11_t12 = (tmp - 1) * 1000;
5345 : else
5346 : cur.t11_t12 = 0;
5347 0 : } else {
5348 0 : cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5349 0 : PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5350 : }
5351 :
5352 : DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5353 : cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5354 :
5355 0 : vbt = dev_priv->vbt.edp_pps;
5356 :
5357 : /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5358 : * our hw here, which are all in 100usec. */
5359 : spec.t1_t3 = 210 * 10;
5360 : spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5361 : spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5362 : spec.t10 = 500 * 10;
5363 : /* This one is special and actually in units of 100ms, but zero
5364 : * based in the hw (so we need to add 100 ms). But the sw vbt
5365 : * table multiplies it with 1000 to make it in units of 100usec,
5366 : * too. */
5367 : spec.t11_t12 = (510 + 100) * 10;
5368 :
5369 : DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5370 : vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5371 :
5372 : /* Use the max of the register settings and vbt. If both are
5373 : * unset, fall back to the spec limits. */
5374 : #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5375 : spec.field : \
5376 : max(cur.field, vbt.field))
5377 0 : assign_final(t1_t3);
5378 0 : assign_final(t8);
5379 0 : assign_final(t9);
5380 0 : assign_final(t10);
5381 0 : assign_final(t11_t12);
5382 : #undef assign_final
5383 :
5384 : #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5385 0 : intel_dp->panel_power_up_delay = get_delay(t1_t3);
5386 0 : intel_dp->backlight_on_delay = get_delay(t8);
5387 0 : intel_dp->backlight_off_delay = get_delay(t9);
5388 0 : intel_dp->panel_power_down_delay = get_delay(t10);
5389 0 : intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5390 : #undef get_delay
5391 :
5392 : DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5393 : intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5394 : intel_dp->panel_power_cycle_delay);
5395 :
5396 : DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5397 : intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5398 0 : }
5399 :
5400 : static void
5401 0 : intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5402 : struct intel_dp *intel_dp)
5403 : {
5404 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5405 : u32 pp_on, pp_off, pp_div, port_sel = 0;
5406 0 : int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5407 : int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5408 0 : enum port port = dp_to_dig_port(intel_dp)->port;
5409 0 : const struct edp_power_seq *seq = &intel_dp->pps_delays;
5410 :
5411 : lockdep_assert_held(&dev_priv->pps_mutex);
5412 :
5413 0 : if (IS_BROXTON(dev)) {
5414 : /*
5415 : * TODO: BXT has 2 sets of PPS registers.
5416 : * Correct Register for Broxton need to be identified
5417 : * using VBT. hardcoding for now
5418 : */
5419 : pp_ctrl_reg = BXT_PP_CONTROL(0);
5420 : pp_on_reg = BXT_PP_ON_DELAYS(0);
5421 : pp_off_reg = BXT_PP_OFF_DELAYS(0);
5422 :
5423 0 : } else if (HAS_PCH_SPLIT(dev)) {
5424 : pp_on_reg = PCH_PP_ON_DELAYS;
5425 : pp_off_reg = PCH_PP_OFF_DELAYS;
5426 : pp_div_reg = PCH_PP_DIVISOR;
5427 0 : } else {
5428 0 : enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5429 :
5430 0 : pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5431 0 : pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5432 0 : pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5433 : }
5434 :
5435 : /*
5436 : * And finally store the new values in the power sequencer. The
5437 : * backlight delays are set to 1 because we do manual waits on them. For
5438 : * T8, even BSpec recommends doing it. For T9, if we don't do this,
5439 : * we'll end up waiting for the backlight off delay twice: once when we
5440 : * do the manual sleep, and once when we disable the panel and wait for
5441 : * the PP_STATUS bit to become zero.
5442 : */
5443 0 : pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5444 : (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5445 0 : pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5446 0 : (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5447 : /* Compute the divisor for the pp clock, simply match the Bspec
5448 : * formula. */
5449 0 : if (IS_BROXTON(dev)) {
5450 0 : pp_div = I915_READ(pp_ctrl_reg);
5451 0 : pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5452 0 : pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5453 0 : << BXT_POWER_CYCLE_DELAY_SHIFT);
5454 0 : } else {
5455 0 : pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5456 0 : pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5457 : << PANEL_POWER_CYCLE_DELAY_SHIFT);
5458 : }
5459 :
5460 : /* Haswell doesn't have any port selection bits for the panel
5461 : * power sequencer any more. */
5462 0 : if (IS_VALLEYVIEW(dev)) {
5463 0 : port_sel = PANEL_PORT_SELECT_VLV(port);
5464 0 : } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5465 0 : if (port == PORT_A)
5466 0 : port_sel = PANEL_PORT_SELECT_DPA;
5467 : else
5468 : port_sel = PANEL_PORT_SELECT_DPD;
5469 : }
5470 :
5471 0 : pp_on |= port_sel;
5472 :
5473 0 : I915_WRITE(pp_on_reg, pp_on);
5474 0 : I915_WRITE(pp_off_reg, pp_off);
5475 0 : if (IS_BROXTON(dev))
5476 0 : I915_WRITE(pp_ctrl_reg, pp_div);
5477 : else
5478 0 : I915_WRITE(pp_div_reg, pp_div);
5479 :
5480 : DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5481 : I915_READ(pp_on_reg),
5482 : I915_READ(pp_off_reg),
5483 : IS_BROXTON(dev) ?
5484 : (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5485 : I915_READ(pp_div_reg));
5486 0 : }
5487 :
5488 : /**
5489 : * intel_dp_set_drrs_state - program registers for RR switch to take effect
5490 : * @dev: DRM device
5491 : * @refresh_rate: RR to be programmed
5492 : *
5493 : * This function gets called when refresh rate (RR) has to be changed from
5494 : * one frequency to another. Switches can be between high and low RR
5495 : * supported by the panel or to any other RR based on media playback (in
5496 : * this case, RR value needs to be passed from user space).
5497 : *
5498 : * The caller of this function needs to take a lock on dev_priv->drrs.
5499 : */
5500 0 : static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5501 : {
5502 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5503 : struct intel_encoder *encoder;
5504 : struct intel_digital_port *dig_port = NULL;
5505 0 : struct intel_dp *intel_dp = dev_priv->drrs.dp;
5506 : struct intel_crtc_state *config = NULL;
5507 : struct intel_crtc *intel_crtc = NULL;
5508 : enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5509 :
5510 0 : if (refresh_rate <= 0) {
5511 : DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5512 0 : return;
5513 : }
5514 :
5515 0 : if (intel_dp == NULL) {
5516 : DRM_DEBUG_KMS("DRRS not supported.\n");
5517 0 : return;
5518 : }
5519 :
5520 : /*
5521 : * FIXME: This needs proper synchronization with psr state for some
5522 : * platforms that cannot have PSR and DRRS enabled at the same time.
5523 : */
5524 :
5525 0 : dig_port = dp_to_dig_port(intel_dp);
5526 0 : encoder = &dig_port->base;
5527 0 : intel_crtc = to_intel_crtc(encoder->base.crtc);
5528 :
5529 0 : if (!intel_crtc) {
5530 : DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5531 0 : return;
5532 : }
5533 :
5534 0 : config = intel_crtc->config;
5535 :
5536 0 : if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5537 : DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5538 0 : return;
5539 : }
5540 :
5541 0 : if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5542 : refresh_rate)
5543 0 : index = DRRS_LOW_RR;
5544 :
5545 0 : if (index == dev_priv->drrs.refresh_rate_type) {
5546 : DRM_DEBUG_KMS(
5547 : "DRRS requested for previously set RR...ignoring\n");
5548 0 : return;
5549 : }
5550 :
5551 0 : if (!intel_crtc->active) {
5552 : DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5553 0 : return;
5554 : }
5555 :
5556 0 : if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5557 0 : switch (index) {
5558 : case DRRS_HIGH_RR:
5559 0 : intel_dp_set_m_n(intel_crtc, M1_N1);
5560 0 : break;
5561 : case DRRS_LOW_RR:
5562 0 : intel_dp_set_m_n(intel_crtc, M2_N2);
5563 0 : break;
5564 : case DRRS_MAX_RR:
5565 : default:
5566 0 : DRM_ERROR("Unsupported refreshrate type\n");
5567 0 : }
5568 0 : } else if (INTEL_INFO(dev)->gen > 6) {
5569 0 : u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5570 : u32 val;
5571 :
5572 0 : val = I915_READ(reg);
5573 0 : if (index > DRRS_HIGH_RR) {
5574 0 : if (IS_VALLEYVIEW(dev))
5575 0 : val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5576 : else
5577 0 : val |= PIPECONF_EDP_RR_MODE_SWITCH;
5578 : } else {
5579 0 : if (IS_VALLEYVIEW(dev))
5580 0 : val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5581 : else
5582 0 : val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5583 : }
5584 0 : I915_WRITE(reg, val);
5585 0 : }
5586 :
5587 0 : dev_priv->drrs.refresh_rate_type = index;
5588 :
5589 : DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5590 0 : }
5591 :
5592 : /**
5593 : * intel_edp_drrs_enable - init drrs struct if supported
5594 : * @intel_dp: DP struct
5595 : *
5596 : * Initializes frontbuffer_bits and drrs.dp
5597 : */
5598 0 : void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5599 : {
5600 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
5601 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5602 0 : struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5603 0 : struct drm_crtc *crtc = dig_port->base.base.crtc;
5604 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5605 :
5606 0 : if (!intel_crtc->config->has_drrs) {
5607 : DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5608 0 : return;
5609 : }
5610 :
5611 0 : mutex_lock(&dev_priv->drrs.mutex);
5612 0 : if (WARN_ON(dev_priv->drrs.dp)) {
5613 0 : DRM_ERROR("DRRS already enabled\n");
5614 0 : goto unlock;
5615 : }
5616 :
5617 0 : dev_priv->drrs.busy_frontbuffer_bits = 0;
5618 :
5619 0 : dev_priv->drrs.dp = intel_dp;
5620 :
5621 : unlock:
5622 0 : mutex_unlock(&dev_priv->drrs.mutex);
5623 0 : }
5624 :
5625 : /**
5626 : * intel_edp_drrs_disable - Disable DRRS
5627 : * @intel_dp: DP struct
5628 : *
5629 : */
5630 0 : void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5631 : {
5632 0 : struct drm_device *dev = intel_dp_to_dev(intel_dp);
5633 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5634 0 : struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5635 0 : struct drm_crtc *crtc = dig_port->base.base.crtc;
5636 0 : struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5637 :
5638 0 : if (!intel_crtc->config->has_drrs)
5639 0 : return;
5640 :
5641 0 : mutex_lock(&dev_priv->drrs.mutex);
5642 0 : if (!dev_priv->drrs.dp) {
5643 0 : mutex_unlock(&dev_priv->drrs.mutex);
5644 0 : return;
5645 : }
5646 :
5647 0 : if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5648 0 : intel_dp_set_drrs_state(dev_priv->dev,
5649 0 : intel_dp->attached_connector->panel.
5650 0 : fixed_mode->vrefresh);
5651 :
5652 0 : dev_priv->drrs.dp = NULL;
5653 0 : mutex_unlock(&dev_priv->drrs.mutex);
5654 :
5655 0 : cancel_delayed_work_sync(&dev_priv->drrs.work);
5656 0 : }
5657 :
5658 0 : static void intel_edp_drrs_downclock_work(struct work_struct *work)
5659 : {
5660 : struct drm_i915_private *dev_priv =
5661 0 : container_of(work, typeof(*dev_priv), drrs.work.work);
5662 : struct intel_dp *intel_dp;
5663 :
5664 0 : mutex_lock(&dev_priv->drrs.mutex);
5665 :
5666 0 : intel_dp = dev_priv->drrs.dp;
5667 :
5668 0 : if (!intel_dp)
5669 : goto unlock;
5670 :
5671 : /*
5672 : * The delayed work can race with an invalidate hence we need to
5673 : * recheck.
5674 : */
5675 :
5676 0 : if (dev_priv->drrs.busy_frontbuffer_bits)
5677 : goto unlock;
5678 :
5679 0 : if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5680 0 : intel_dp_set_drrs_state(dev_priv->dev,
5681 0 : intel_dp->attached_connector->panel.
5682 0 : downclock_mode->vrefresh);
5683 :
5684 : unlock:
5685 0 : mutex_unlock(&dev_priv->drrs.mutex);
5686 0 : }
5687 :
5688 : /**
5689 : * intel_edp_drrs_invalidate - Disable Idleness DRRS
5690 : * @dev: DRM device
5691 : * @frontbuffer_bits: frontbuffer plane tracking bits
5692 : *
5693 : * This function gets called everytime rendering on the given planes start.
5694 : * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5695 : *
5696 : * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5697 : */
5698 0 : void intel_edp_drrs_invalidate(struct drm_device *dev,
5699 : unsigned frontbuffer_bits)
5700 : {
5701 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5702 : struct drm_crtc *crtc;
5703 : enum pipe pipe;
5704 :
5705 0 : if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5706 0 : return;
5707 :
5708 0 : cancel_delayed_work(&dev_priv->drrs.work);
5709 :
5710 0 : mutex_lock(&dev_priv->drrs.mutex);
5711 0 : if (!dev_priv->drrs.dp) {
5712 0 : mutex_unlock(&dev_priv->drrs.mutex);
5713 0 : return;
5714 : }
5715 :
5716 0 : crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5717 0 : pipe = to_intel_crtc(crtc)->pipe;
5718 :
5719 0 : frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5720 0 : dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5721 :
5722 : /* invalidate means busy screen hence upclock */
5723 0 : if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5724 0 : intel_dp_set_drrs_state(dev_priv->dev,
5725 0 : dev_priv->drrs.dp->attached_connector->panel.
5726 0 : fixed_mode->vrefresh);
5727 :
5728 0 : mutex_unlock(&dev_priv->drrs.mutex);
5729 0 : }
5730 :
5731 : /**
5732 : * intel_edp_drrs_flush - Restart Idleness DRRS
5733 : * @dev: DRM device
5734 : * @frontbuffer_bits: frontbuffer plane tracking bits
5735 : *
5736 : * This function gets called every time rendering on the given planes has
5737 : * completed or flip on a crtc is completed. So DRRS should be upclocked
5738 : * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5739 : * if no other planes are dirty.
5740 : *
5741 : * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5742 : */
5743 0 : void intel_edp_drrs_flush(struct drm_device *dev,
5744 : unsigned frontbuffer_bits)
5745 : {
5746 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5747 : struct drm_crtc *crtc;
5748 : enum pipe pipe;
5749 :
5750 0 : if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5751 0 : return;
5752 :
5753 0 : cancel_delayed_work(&dev_priv->drrs.work);
5754 :
5755 0 : mutex_lock(&dev_priv->drrs.mutex);
5756 0 : if (!dev_priv->drrs.dp) {
5757 0 : mutex_unlock(&dev_priv->drrs.mutex);
5758 0 : return;
5759 : }
5760 :
5761 0 : crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5762 0 : pipe = to_intel_crtc(crtc)->pipe;
5763 :
5764 0 : frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5765 0 : dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5766 :
5767 : /* flush means busy screen hence upclock */
5768 0 : if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5769 0 : intel_dp_set_drrs_state(dev_priv->dev,
5770 0 : dev_priv->drrs.dp->attached_connector->panel.
5771 0 : fixed_mode->vrefresh);
5772 :
5773 : /*
5774 : * flush also means no more activity hence schedule downclock, if all
5775 : * other fbs are quiescent too
5776 : */
5777 0 : if (!dev_priv->drrs.busy_frontbuffer_bits)
5778 0 : schedule_delayed_work(&dev_priv->drrs.work,
5779 0 : msecs_to_jiffies(1000));
5780 0 : mutex_unlock(&dev_priv->drrs.mutex);
5781 0 : }
5782 :
5783 : /**
5784 : * DOC: Display Refresh Rate Switching (DRRS)
5785 : *
5786 : * Display Refresh Rate Switching (DRRS) is a power conservation feature
5787 : * which enables swtching between low and high refresh rates,
5788 : * dynamically, based on the usage scenario. This feature is applicable
5789 : * for internal panels.
5790 : *
5791 : * Indication that the panel supports DRRS is given by the panel EDID, which
5792 : * would list multiple refresh rates for one resolution.
5793 : *
5794 : * DRRS is of 2 types - static and seamless.
5795 : * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5796 : * (may appear as a blink on screen) and is used in dock-undock scenario.
5797 : * Seamless DRRS involves changing RR without any visual effect to the user
5798 : * and can be used during normal system usage. This is done by programming
5799 : * certain registers.
5800 : *
5801 : * Support for static/seamless DRRS may be indicated in the VBT based on
5802 : * inputs from the panel spec.
5803 : *
5804 : * DRRS saves power by switching to low RR based on usage scenarios.
5805 : *
5806 : * eDP DRRS:-
5807 : * The implementation is based on frontbuffer tracking implementation.
5808 : * When there is a disturbance on the screen triggered by user activity or a
5809 : * periodic system activity, DRRS is disabled (RR is changed to high RR).
5810 : * When there is no movement on screen, after a timeout of 1 second, a switch
5811 : * to low RR is made.
5812 : * For integration with frontbuffer tracking code,
5813 : * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5814 : *
5815 : * DRRS can be further extended to support other internal panels and also
5816 : * the scenario of video playback wherein RR is set based on the rate
5817 : * requested by userspace.
5818 : */
5819 :
5820 : /**
5821 : * intel_dp_drrs_init - Init basic DRRS work and mutex.
5822 : * @intel_connector: eDP connector
5823 : * @fixed_mode: preferred mode of panel
5824 : *
5825 : * This function is called only once at driver load to initialize basic
5826 : * DRRS stuff.
5827 : *
5828 : * Returns:
5829 : * Downclock mode if panel supports it, else return NULL.
5830 : * DRRS support is determined by the presence of downclock mode (apart
5831 : * from VBT setting).
5832 : */
5833 : static struct drm_display_mode *
5834 0 : intel_dp_drrs_init(struct intel_connector *intel_connector,
5835 : struct drm_display_mode *fixed_mode)
5836 : {
5837 0 : struct drm_connector *connector = &intel_connector->base;
5838 0 : struct drm_device *dev = connector->dev;
5839 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5840 : struct drm_display_mode *downclock_mode = NULL;
5841 :
5842 0 : INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5843 0 : rw_init(&dev_priv->drrs.mutex,"drrs");
5844 :
5845 0 : if (INTEL_INFO(dev)->gen <= 6) {
5846 : DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5847 0 : return NULL;
5848 : }
5849 :
5850 0 : if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5851 : DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5852 0 : return NULL;
5853 : }
5854 :
5855 0 : downclock_mode = intel_find_panel_downclock
5856 : (dev, fixed_mode, connector);
5857 :
5858 0 : if (!downclock_mode) {
5859 : DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5860 0 : return NULL;
5861 : }
5862 :
5863 0 : dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5864 :
5865 0 : dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5866 : DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5867 0 : return downclock_mode;
5868 0 : }
5869 :
5870 0 : static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5871 : struct intel_connector *intel_connector)
5872 : {
5873 0 : struct drm_connector *connector = &intel_connector->base;
5874 0 : struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5875 0 : struct intel_encoder *intel_encoder = &intel_dig_port->base;
5876 0 : struct drm_device *dev = intel_encoder->base.dev;
5877 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5878 : struct drm_display_mode *fixed_mode = NULL;
5879 : struct drm_display_mode *downclock_mode = NULL;
5880 : bool has_dpcd;
5881 : struct drm_display_mode *scan;
5882 : struct edid *edid;
5883 : enum pipe pipe = INVALID_PIPE;
5884 :
5885 0 : if (!is_edp(intel_dp))
5886 0 : return true;
5887 :
5888 0 : pps_lock(intel_dp);
5889 0 : intel_edp_panel_vdd_sanitize(intel_dp);
5890 0 : pps_unlock(intel_dp);
5891 :
5892 : /* Cache DPCD and EDID for edp. */
5893 0 : has_dpcd = intel_dp_get_dpcd(intel_dp);
5894 :
5895 0 : if (has_dpcd) {
5896 0 : if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5897 0 : dev_priv->no_aux_handshake =
5898 0 : intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5899 : DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5900 : } else {
5901 : /* if this fails, presume the device is a ghost */
5902 : DRM_INFO("failed to retrieve link info, disabling eDP\n");
5903 0 : return false;
5904 : }
5905 :
5906 : /* We now know it's not a ghost, init power sequence regs. */
5907 0 : pps_lock(intel_dp);
5908 0 : intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5909 0 : pps_unlock(intel_dp);
5910 :
5911 0 : mutex_lock(&dev->mode_config.mutex);
5912 0 : edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5913 0 : if (edid) {
5914 0 : if (drm_add_edid_modes(connector, edid)) {
5915 0 : drm_mode_connector_update_edid_property(connector,
5916 : edid);
5917 0 : drm_edid_to_eld(connector, edid);
5918 0 : } else {
5919 0 : kfree(edid);
5920 0 : edid = ERR_PTR(-EINVAL);
5921 : }
5922 : } else {
5923 0 : edid = ERR_PTR(-ENOENT);
5924 : }
5925 0 : intel_connector->edid = edid;
5926 :
5927 : /* prefer fixed mode from EDID if available */
5928 0 : list_for_each_entry(scan, &connector->probed_modes, head) {
5929 0 : if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5930 0 : fixed_mode = drm_mode_duplicate(dev, scan);
5931 0 : downclock_mode = intel_dp_drrs_init(
5932 : intel_connector, fixed_mode);
5933 0 : break;
5934 : }
5935 : }
5936 :
5937 : /* fallback to VBT if available for eDP */
5938 0 : if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5939 0 : fixed_mode = drm_mode_duplicate(dev,
5940 : dev_priv->vbt.lfp_lvds_vbt_mode);
5941 0 : if (fixed_mode)
5942 0 : fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5943 : }
5944 0 : mutex_unlock(&dev->mode_config.mutex);
5945 :
5946 0 : if (IS_VALLEYVIEW(dev)) {
5947 0 : intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5948 : register_reboot_notifier(&intel_dp->edp_notifier);
5949 :
5950 : /*
5951 : * Figure out the current pipe for the initial backlight setup.
5952 : * If the current pipe isn't valid, try the PPS pipe, and if that
5953 : * fails just assume pipe A.
5954 : */
5955 0 : if (IS_CHERRYVIEW(dev))
5956 0 : pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5957 : else
5958 0 : pipe = PORT_TO_PIPE(intel_dp->DP);
5959 :
5960 0 : if (pipe != PIPE_A && pipe != PIPE_B)
5961 0 : pipe = intel_dp->pps_pipe;
5962 :
5963 0 : if (pipe != PIPE_A && pipe != PIPE_B)
5964 0 : pipe = PIPE_A;
5965 :
5966 : DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5967 : pipe_name(pipe));
5968 : }
5969 :
5970 0 : intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5971 0 : intel_connector->panel.backlight.power = intel_edp_backlight_power;
5972 0 : intel_panel_setup_backlight(connector, pipe);
5973 :
5974 0 : return true;
5975 0 : }
5976 :
5977 : bool
5978 0 : intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5979 : struct intel_connector *intel_connector)
5980 : {
5981 0 : struct drm_connector *connector = &intel_connector->base;
5982 0 : struct intel_dp *intel_dp = &intel_dig_port->dp;
5983 0 : struct intel_encoder *intel_encoder = &intel_dig_port->base;
5984 0 : struct drm_device *dev = intel_encoder->base.dev;
5985 0 : struct drm_i915_private *dev_priv = dev->dev_private;
5986 0 : enum port port = intel_dig_port->port;
5987 : int type;
5988 :
5989 0 : intel_dp->pps_pipe = INVALID_PIPE;
5990 :
5991 : /* intel_dp vfuncs */
5992 0 : if (INTEL_INFO(dev)->gen >= 9)
5993 0 : intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5994 0 : else if (IS_VALLEYVIEW(dev))
5995 0 : intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5996 0 : else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5997 0 : intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5998 0 : else if (HAS_PCH_SPLIT(dev))
5999 0 : intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6000 : else
6001 0 : intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6002 :
6003 0 : if (INTEL_INFO(dev)->gen >= 9)
6004 0 : intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6005 : else
6006 0 : intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6007 :
6008 : /* Preserve the current hw state. */
6009 0 : intel_dp->DP = I915_READ(intel_dp->output_reg);
6010 0 : intel_dp->attached_connector = intel_connector;
6011 :
6012 0 : if (intel_dp_is_edp(dev, port))
6013 0 : type = DRM_MODE_CONNECTOR_eDP;
6014 : else
6015 : type = DRM_MODE_CONNECTOR_DisplayPort;
6016 :
6017 : /*
6018 : * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6019 : * for DP the encoder type can be set by the caller to
6020 : * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6021 : */
6022 0 : if (type == DRM_MODE_CONNECTOR_eDP)
6023 0 : intel_encoder->type = INTEL_OUTPUT_EDP;
6024 :
6025 : /* eDP only on port B and/or C on vlv/chv */
6026 0 : if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6027 : port != PORT_B && port != PORT_C))
6028 0 : return false;
6029 :
6030 : DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6031 : type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6032 : port_name(port));
6033 :
6034 0 : drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6035 0 : drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6036 :
6037 0 : connector->interlace_allowed = true;
6038 0 : connector->doublescan_allowed = 0;
6039 :
6040 0 : INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6041 : edp_panel_vdd_work);
6042 :
6043 0 : intel_connector_attach_encoder(intel_connector, intel_encoder);
6044 0 : drm_connector_register(connector);
6045 :
6046 0 : if (HAS_DDI(dev))
6047 0 : intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6048 : else
6049 0 : intel_connector->get_hw_state = intel_connector_get_hw_state;
6050 0 : intel_connector->unregister = intel_dp_connector_unregister;
6051 :
6052 : /* Set up the hotplug pin. */
6053 0 : switch (port) {
6054 : case PORT_A:
6055 0 : intel_encoder->hpd_pin = HPD_PORT_A;
6056 0 : break;
6057 : case PORT_B:
6058 0 : intel_encoder->hpd_pin = HPD_PORT_B;
6059 0 : if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
6060 0 : intel_encoder->hpd_pin = HPD_PORT_A;
6061 : break;
6062 : case PORT_C:
6063 0 : intel_encoder->hpd_pin = HPD_PORT_C;
6064 0 : break;
6065 : case PORT_D:
6066 0 : intel_encoder->hpd_pin = HPD_PORT_D;
6067 0 : break;
6068 : case PORT_E:
6069 0 : intel_encoder->hpd_pin = HPD_PORT_E;
6070 0 : break;
6071 : default:
6072 0 : BUG();
6073 : }
6074 :
6075 0 : if (is_edp(intel_dp)) {
6076 0 : pps_lock(intel_dp);
6077 0 : intel_dp_init_panel_power_timestamps(intel_dp);
6078 0 : if (IS_VALLEYVIEW(dev))
6079 0 : vlv_initial_power_sequencer_setup(intel_dp);
6080 : else
6081 0 : intel_dp_init_panel_power_sequencer(dev, intel_dp);
6082 0 : pps_unlock(intel_dp);
6083 0 : }
6084 :
6085 0 : intel_dp_aux_init(intel_dp, intel_connector);
6086 :
6087 : /* init MST on ports that can support it */
6088 0 : if (HAS_DP_MST(dev) &&
6089 0 : (port == PORT_B || port == PORT_C || port == PORT_D))
6090 0 : intel_dp_mst_encoder_init(intel_dig_port,
6091 0 : intel_connector->base.base.id);
6092 :
6093 0 : if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6094 0 : drm_dp_aux_unregister(&intel_dp->aux);
6095 0 : if (is_edp(intel_dp)) {
6096 0 : cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6097 : /*
6098 : * vdd might still be enabled do to the delayed vdd off.
6099 : * Make sure vdd is actually turned off here.
6100 : */
6101 0 : pps_lock(intel_dp);
6102 0 : edp_panel_vdd_off_sync(intel_dp);
6103 0 : pps_unlock(intel_dp);
6104 0 : }
6105 0 : drm_connector_unregister(connector);
6106 0 : drm_connector_cleanup(connector);
6107 0 : return false;
6108 : }
6109 :
6110 0 : intel_dp_add_properties(intel_dp, connector);
6111 :
6112 : /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6113 : * 0xd. Failure to do so will result in spurious interrupts being
6114 : * generated on the port when a cable is not attached.
6115 : */
6116 0 : if (IS_G4X(dev) && !IS_GM45(dev)) {
6117 0 : u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6118 0 : I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6119 0 : }
6120 :
6121 0 : i915_debugfs_connector_add(connector);
6122 :
6123 0 : return true;
6124 0 : }
6125 :
6126 0 : bool intel_dp_init(struct drm_device *dev,
6127 : int output_reg,
6128 : enum port port)
6129 : {
6130 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6131 : struct intel_digital_port *intel_dig_port;
6132 : struct intel_encoder *intel_encoder;
6133 : struct drm_encoder *encoder;
6134 : struct intel_connector *intel_connector;
6135 :
6136 0 : intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6137 0 : if (!intel_dig_port)
6138 0 : return false;
6139 :
6140 0 : intel_connector = intel_connector_alloc();
6141 0 : if (!intel_connector)
6142 : goto err_connector_alloc;
6143 :
6144 0 : intel_encoder = &intel_dig_port->base;
6145 0 : encoder = &intel_encoder->base;
6146 :
6147 0 : drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6148 : DRM_MODE_ENCODER_TMDS);
6149 :
6150 0 : intel_encoder->compute_config = intel_dp_compute_config;
6151 0 : intel_encoder->disable = intel_disable_dp;
6152 0 : intel_encoder->get_hw_state = intel_dp_get_hw_state;
6153 0 : intel_encoder->get_config = intel_dp_get_config;
6154 0 : intel_encoder->suspend = intel_dp_encoder_suspend;
6155 0 : if (IS_CHERRYVIEW(dev)) {
6156 0 : intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6157 0 : intel_encoder->pre_enable = chv_pre_enable_dp;
6158 0 : intel_encoder->enable = vlv_enable_dp;
6159 0 : intel_encoder->post_disable = chv_post_disable_dp;
6160 0 : intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6161 0 : } else if (IS_VALLEYVIEW(dev)) {
6162 0 : intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6163 0 : intel_encoder->pre_enable = vlv_pre_enable_dp;
6164 0 : intel_encoder->enable = vlv_enable_dp;
6165 0 : intel_encoder->post_disable = vlv_post_disable_dp;
6166 0 : } else {
6167 0 : intel_encoder->pre_enable = g4x_pre_enable_dp;
6168 0 : intel_encoder->enable = g4x_enable_dp;
6169 0 : if (INTEL_INFO(dev)->gen >= 5)
6170 0 : intel_encoder->post_disable = ilk_post_disable_dp;
6171 : }
6172 :
6173 0 : intel_dig_port->port = port;
6174 0 : intel_dig_port->dp.output_reg = output_reg;
6175 :
6176 0 : intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6177 0 : if (IS_CHERRYVIEW(dev)) {
6178 0 : if (port == PORT_D)
6179 0 : intel_encoder->crtc_mask = 1 << 2;
6180 : else
6181 0 : intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6182 : } else {
6183 0 : intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6184 : }
6185 0 : intel_encoder->cloneable = 0;
6186 :
6187 0 : intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6188 0 : dev_priv->hotplug.irq_port[port] = intel_dig_port;
6189 :
6190 0 : if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6191 : goto err_init_connector;
6192 :
6193 0 : return true;
6194 :
6195 : err_init_connector:
6196 0 : drm_encoder_cleanup(encoder);
6197 0 : kfree(intel_connector);
6198 : err_connector_alloc:
6199 0 : kfree(intel_dig_port);
6200 0 : return false;
6201 0 : }
6202 :
6203 0 : void intel_dp_mst_suspend(struct drm_device *dev)
6204 : {
6205 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6206 : int i;
6207 :
6208 : /* disable MST */
6209 0 : for (i = 0; i < I915_MAX_PORTS; i++) {
6210 0 : struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6211 0 : if (!intel_dig_port)
6212 0 : continue;
6213 :
6214 0 : if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6215 0 : if (!intel_dig_port->dp.can_mst)
6216 0 : continue;
6217 0 : if (intel_dig_port->dp.is_mst)
6218 0 : drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6219 : }
6220 0 : }
6221 0 : }
6222 :
6223 0 : void intel_dp_mst_resume(struct drm_device *dev)
6224 : {
6225 0 : struct drm_i915_private *dev_priv = dev->dev_private;
6226 : int i;
6227 :
6228 0 : for (i = 0; i < I915_MAX_PORTS; i++) {
6229 0 : struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6230 0 : if (!intel_dig_port)
6231 0 : continue;
6232 0 : if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6233 : int ret;
6234 :
6235 0 : if (!intel_dig_port->dp.can_mst)
6236 0 : continue;
6237 :
6238 0 : ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6239 0 : if (ret != 0) {
6240 0 : intel_dp_check_mst_status(&intel_dig_port->dp);
6241 0 : }
6242 0 : }
6243 0 : }
6244 0 : }
|