Line data Source code
1 : /*
2 : * Copyright 2008 Advanced Micro Devices, Inc.
3 : * Copyright 2008 Red Hat Inc.
4 : * Copyright 2009 Jerome Glisse.
5 : *
6 : * Permission is hereby granted, free of charge, to any person obtaining a
7 : * copy of this software and associated documentation files (the "Software"),
8 : * to deal in the Software without restriction, including without limitation
9 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 : * and/or sell copies of the Software, and to permit persons to whom the
11 : * Software is furnished to do so, subject to the following conditions:
12 : *
13 : * The above copyright notice and this permission notice shall be included in
14 : * all copies or substantial portions of the Software.
15 : *
16 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 : * OTHER DEALINGS IN THE SOFTWARE.
23 : *
24 : * Authors: Dave Airlie
25 : * Alex Deucher
26 : * Jerome Glisse
27 : */
28 : #include <dev/pci/drm/drmP.h>
29 : #include <dev/pci/drm/radeon_drm.h>
30 : #include "radeon.h"
31 : #include "radeon_asic.h"
32 : #include "radeon_audio.h"
33 : #include "radeon_mode.h"
34 : #include "r600d.h"
35 : #include "atom.h"
36 : #include "avivod.h"
37 : #include "radeon_ucode.h"
38 :
39 : /* Firmware Names */
40 : MODULE_FIRMWARE("radeon/R600_pfp.bin");
41 : MODULE_FIRMWARE("radeon/R600_me.bin");
42 : MODULE_FIRMWARE("radeon/RV610_pfp.bin");
43 : MODULE_FIRMWARE("radeon/RV610_me.bin");
44 : MODULE_FIRMWARE("radeon/RV630_pfp.bin");
45 : MODULE_FIRMWARE("radeon/RV630_me.bin");
46 : MODULE_FIRMWARE("radeon/RV620_pfp.bin");
47 : MODULE_FIRMWARE("radeon/RV620_me.bin");
48 : MODULE_FIRMWARE("radeon/RV635_pfp.bin");
49 : MODULE_FIRMWARE("radeon/RV635_me.bin");
50 : MODULE_FIRMWARE("radeon/RV670_pfp.bin");
51 : MODULE_FIRMWARE("radeon/RV670_me.bin");
52 : MODULE_FIRMWARE("radeon/RS780_pfp.bin");
53 : MODULE_FIRMWARE("radeon/RS780_me.bin");
54 : MODULE_FIRMWARE("radeon/RV770_pfp.bin");
55 : MODULE_FIRMWARE("radeon/RV770_me.bin");
56 : MODULE_FIRMWARE("radeon/RV770_smc.bin");
57 : MODULE_FIRMWARE("radeon/RV730_pfp.bin");
58 : MODULE_FIRMWARE("radeon/RV730_me.bin");
59 : MODULE_FIRMWARE("radeon/RV730_smc.bin");
60 : MODULE_FIRMWARE("radeon/RV740_smc.bin");
61 : MODULE_FIRMWARE("radeon/RV710_pfp.bin");
62 : MODULE_FIRMWARE("radeon/RV710_me.bin");
63 : MODULE_FIRMWARE("radeon/RV710_smc.bin");
64 : MODULE_FIRMWARE("radeon/R600_rlc.bin");
65 : MODULE_FIRMWARE("radeon/R700_rlc.bin");
66 : MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
67 : MODULE_FIRMWARE("radeon/CEDAR_me.bin");
68 : MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
69 : MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
70 : MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
71 : MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
72 : MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
73 : MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
74 : MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
75 : MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
76 : MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
77 : MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
78 : MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
79 : MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
80 : MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
81 : MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
82 : MODULE_FIRMWARE("radeon/PALM_pfp.bin");
83 : MODULE_FIRMWARE("radeon/PALM_me.bin");
84 : MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
85 : MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
86 : MODULE_FIRMWARE("radeon/SUMO_me.bin");
87 : MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
88 : MODULE_FIRMWARE("radeon/SUMO2_me.bin");
89 :
90 : static const u32 crtc_offsets[2] =
91 : {
92 : 0,
93 : AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
94 : };
95 :
96 : int r600_debugfs_mc_info_init(struct radeon_device *rdev);
97 :
98 : /* r600,rv610,rv630,rv620,rv635,rv670 */
99 : int r600_mc_wait_for_idle(struct radeon_device *rdev);
100 : static void r600_gpu_init(struct radeon_device *rdev);
101 : void r600_fini(struct radeon_device *rdev);
102 : void r600_irq_disable(struct radeon_device *rdev);
103 : static void r600_pcie_gen2_enable(struct radeon_device *rdev);
104 : extern int evergreen_rlc_resume(struct radeon_device *rdev);
105 : extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
106 :
107 : /*
108 : * Indirect registers accessor
109 : */
110 0 : u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
111 : {
112 : unsigned long flags;
113 : u32 r;
114 :
115 0 : spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
116 0 : WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
117 0 : r = RREG32(R600_RCU_DATA);
118 0 : spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
119 0 : return r;
120 : }
121 :
122 0 : void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
123 : {
124 : unsigned long flags;
125 :
126 0 : spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
127 0 : WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
128 0 : WREG32(R600_RCU_DATA, (v));
129 0 : spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
130 0 : }
131 :
132 0 : u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
133 : {
134 : unsigned long flags;
135 : u32 r;
136 :
137 0 : spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
138 0 : WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
139 0 : r = RREG32(R600_UVD_CTX_DATA);
140 0 : spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
141 0 : return r;
142 : }
143 :
144 0 : void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
145 : {
146 : unsigned long flags;
147 :
148 0 : spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
149 0 : WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
150 0 : WREG32(R600_UVD_CTX_DATA, (v));
151 0 : spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
152 0 : }
153 :
154 : /**
155 : * r600_get_allowed_info_register - fetch the register for the info ioctl
156 : *
157 : * @rdev: radeon_device pointer
158 : * @reg: register offset in bytes
159 : * @val: register value
160 : *
161 : * Returns 0 for success or -EINVAL for an invalid register
162 : *
163 : */
164 0 : int r600_get_allowed_info_register(struct radeon_device *rdev,
165 : u32 reg, u32 *val)
166 : {
167 0 : switch (reg) {
168 : case GRBM_STATUS:
169 : case GRBM_STATUS2:
170 : case R_000E50_SRBM_STATUS:
171 : case DMA_STATUS_REG:
172 : case UVD_STATUS:
173 0 : *val = RREG32(reg);
174 0 : return 0;
175 : default:
176 0 : return -EINVAL;
177 : }
178 0 : }
179 :
180 : /**
181 : * r600_get_xclk - get the xclk
182 : *
183 : * @rdev: radeon_device pointer
184 : *
185 : * Returns the reference clock used by the gfx engine
186 : * (r6xx, IGPs, APUs).
187 : */
188 0 : u32 r600_get_xclk(struct radeon_device *rdev)
189 : {
190 0 : return rdev->clock.spll.reference_freq;
191 : }
192 :
193 0 : int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
194 : {
195 0 : unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
196 : int r;
197 :
198 : /* bypass vclk and dclk with bclk */
199 0 : WREG32_P(CG_UPLL_FUNC_CNTL_2,
200 : VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
201 : ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
202 :
203 : /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
204 0 : WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
205 : UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
206 :
207 0 : if (rdev->family >= CHIP_RS780)
208 0 : WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
209 : ~UPLL_BYPASS_CNTL);
210 :
211 0 : if (!vclk || !dclk) {
212 : /* keep the Bypass mode, put PLL to sleep */
213 0 : WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
214 0 : return 0;
215 : }
216 :
217 0 : if (rdev->clock.spll.reference_freq == 10000)
218 0 : ref_div = 34;
219 : else
220 : ref_div = 4;
221 :
222 0 : r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
223 0 : ref_div + 1, 0xFFF, 2, 30, ~0,
224 : &fb_div, &vclk_div, &dclk_div);
225 0 : if (r)
226 0 : return r;
227 :
228 0 : if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
229 0 : fb_div >>= 1;
230 : else
231 0 : fb_div |= 1;
232 :
233 0 : r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
234 0 : if (r)
235 0 : return r;
236 :
237 : /* assert PLL_RESET */
238 0 : WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
239 :
240 : /* For RS780 we have to choose ref clk */
241 0 : if (rdev->family >= CHIP_RS780)
242 0 : WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
243 : ~UPLL_REFCLK_SRC_SEL_MASK);
244 :
245 : /* set the required fb, ref and post divder values */
246 0 : WREG32_P(CG_UPLL_FUNC_CNTL,
247 : UPLL_FB_DIV(fb_div) |
248 : UPLL_REF_DIV(ref_div),
249 : ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
250 0 : WREG32_P(CG_UPLL_FUNC_CNTL_2,
251 : UPLL_SW_HILEN(vclk_div >> 1) |
252 : UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
253 : UPLL_SW_HILEN2(dclk_div >> 1) |
254 : UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
255 : UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
256 : ~UPLL_SW_MASK);
257 :
258 : /* give the PLL some time to settle */
259 0 : mdelay(15);
260 :
261 : /* deassert PLL_RESET */
262 0 : WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
263 :
264 0 : mdelay(15);
265 :
266 : /* deassert BYPASS EN */
267 0 : WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
268 :
269 0 : if (rdev->family >= CHIP_RS780)
270 0 : WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
271 :
272 0 : r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
273 0 : if (r)
274 0 : return r;
275 :
276 : /* switch VCLK and DCLK selection */
277 0 : WREG32_P(CG_UPLL_FUNC_CNTL_2,
278 : VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
279 : ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
280 :
281 0 : mdelay(100);
282 :
283 0 : return 0;
284 0 : }
285 :
286 0 : void dce3_program_fmt(struct drm_encoder *encoder)
287 : {
288 0 : struct drm_device *dev = encoder->dev;
289 0 : struct radeon_device *rdev = dev->dev_private;
290 0 : struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
291 0 : struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
292 0 : struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
293 : int bpc = 0;
294 : u32 tmp = 0;
295 : enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
296 :
297 0 : if (connector) {
298 0 : struct radeon_connector *radeon_connector = to_radeon_connector(connector);
299 0 : bpc = radeon_get_monitor_bpc(connector);
300 0 : dither = radeon_connector->dither;
301 0 : }
302 :
303 : /* LVDS FMT is set up by atom */
304 0 : if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
305 0 : return;
306 :
307 : /* not needed for analog */
308 0 : if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
309 0 : (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
310 0 : return;
311 :
312 0 : if (bpc == 0)
313 0 : return;
314 :
315 0 : switch (bpc) {
316 : case 6:
317 0 : if (dither == RADEON_FMT_DITHER_ENABLE)
318 : /* XXX sort out optimal dither settings */
319 0 : tmp |= FMT_SPATIAL_DITHER_EN;
320 : else
321 : tmp |= FMT_TRUNCATE_EN;
322 : break;
323 : case 8:
324 0 : if (dither == RADEON_FMT_DITHER_ENABLE)
325 : /* XXX sort out optimal dither settings */
326 0 : tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
327 : else
328 : tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
329 : break;
330 : case 10:
331 : default:
332 : /* not needed */
333 : break;
334 : }
335 :
336 0 : WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
337 0 : }
338 :
339 : /* get temperature in millidegrees */
340 0 : int rv6xx_get_temp(struct radeon_device *rdev)
341 : {
342 0 : u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
343 : ASIC_T_SHIFT;
344 0 : int actual_temp = temp & 0xff;
345 :
346 0 : if (temp & 0x100)
347 0 : actual_temp -= 256;
348 :
349 0 : return actual_temp * 1000;
350 : }
351 :
352 0 : void r600_pm_get_dynpm_state(struct radeon_device *rdev)
353 : {
354 : int i;
355 :
356 0 : rdev->pm.dynpm_can_upclock = true;
357 0 : rdev->pm.dynpm_can_downclock = true;
358 :
359 : /* power state array is low to high, default is first */
360 0 : if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
361 : int min_power_state_index = 0;
362 :
363 0 : if (rdev->pm.num_power_states > 2)
364 : min_power_state_index = 1;
365 :
366 0 : switch (rdev->pm.dynpm_planned_action) {
367 : case DYNPM_ACTION_MINIMUM:
368 0 : rdev->pm.requested_power_state_index = min_power_state_index;
369 0 : rdev->pm.requested_clock_mode_index = 0;
370 0 : rdev->pm.dynpm_can_downclock = false;
371 0 : break;
372 : case DYNPM_ACTION_DOWNCLOCK:
373 0 : if (rdev->pm.current_power_state_index == min_power_state_index) {
374 0 : rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
375 0 : rdev->pm.dynpm_can_downclock = false;
376 0 : } else {
377 0 : if (rdev->pm.active_crtc_count > 1) {
378 0 : for (i = 0; i < rdev->pm.num_power_states; i++) {
379 0 : if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
380 : continue;
381 0 : else if (i >= rdev->pm.current_power_state_index) {
382 0 : rdev->pm.requested_power_state_index =
383 : rdev->pm.current_power_state_index;
384 0 : break;
385 : } else {
386 0 : rdev->pm.requested_power_state_index = i;
387 0 : break;
388 : }
389 : }
390 : } else {
391 0 : if (rdev->pm.current_power_state_index == 0)
392 0 : rdev->pm.requested_power_state_index =
393 0 : rdev->pm.num_power_states - 1;
394 : else
395 0 : rdev->pm.requested_power_state_index =
396 0 : rdev->pm.current_power_state_index - 1;
397 : }
398 : }
399 0 : rdev->pm.requested_clock_mode_index = 0;
400 : /* don't use the power state if crtcs are active and no display flag is set */
401 0 : if ((rdev->pm.active_crtc_count > 0) &&
402 0 : (rdev->pm.power_state[rdev->pm.requested_power_state_index].
403 0 : clock_info[rdev->pm.requested_clock_mode_index].flags &
404 : RADEON_PM_MODE_NO_DISPLAY)) {
405 0 : rdev->pm.requested_power_state_index++;
406 0 : }
407 : break;
408 : case DYNPM_ACTION_UPCLOCK:
409 0 : if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
410 0 : rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
411 0 : rdev->pm.dynpm_can_upclock = false;
412 0 : } else {
413 0 : if (rdev->pm.active_crtc_count > 1) {
414 0 : for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
415 0 : if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
416 : continue;
417 0 : else if (i <= rdev->pm.current_power_state_index) {
418 0 : rdev->pm.requested_power_state_index =
419 : rdev->pm.current_power_state_index;
420 0 : break;
421 : } else {
422 0 : rdev->pm.requested_power_state_index = i;
423 0 : break;
424 : }
425 : }
426 : } else
427 0 : rdev->pm.requested_power_state_index =
428 0 : rdev->pm.current_power_state_index + 1;
429 : }
430 0 : rdev->pm.requested_clock_mode_index = 0;
431 0 : break;
432 : case DYNPM_ACTION_DEFAULT:
433 0 : rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
434 0 : rdev->pm.requested_clock_mode_index = 0;
435 0 : rdev->pm.dynpm_can_upclock = false;
436 0 : break;
437 : case DYNPM_ACTION_NONE:
438 : default:
439 0 : DRM_ERROR("Requested mode for not defined action\n");
440 0 : return;
441 : }
442 0 : } else {
443 : /* XXX select a power state based on AC/DC, single/dualhead, etc. */
444 : /* for now just select the first power state and switch between clock modes */
445 : /* power state array is low to high, default is first (0) */
446 0 : if (rdev->pm.active_crtc_count > 1) {
447 0 : rdev->pm.requested_power_state_index = -1;
448 : /* start at 1 as we don't want the default mode */
449 0 : for (i = 1; i < rdev->pm.num_power_states; i++) {
450 0 : if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
451 : continue;
452 0 : else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
453 0 : (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
454 0 : rdev->pm.requested_power_state_index = i;
455 0 : break;
456 : }
457 : }
458 : /* if nothing selected, grab the default state. */
459 0 : if (rdev->pm.requested_power_state_index == -1)
460 0 : rdev->pm.requested_power_state_index = 0;
461 : } else
462 0 : rdev->pm.requested_power_state_index = 1;
463 :
464 0 : switch (rdev->pm.dynpm_planned_action) {
465 : case DYNPM_ACTION_MINIMUM:
466 0 : rdev->pm.requested_clock_mode_index = 0;
467 0 : rdev->pm.dynpm_can_downclock = false;
468 0 : break;
469 : case DYNPM_ACTION_DOWNCLOCK:
470 0 : if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
471 0 : if (rdev->pm.current_clock_mode_index == 0) {
472 0 : rdev->pm.requested_clock_mode_index = 0;
473 0 : rdev->pm.dynpm_can_downclock = false;
474 0 : } else
475 0 : rdev->pm.requested_clock_mode_index =
476 0 : rdev->pm.current_clock_mode_index - 1;
477 : } else {
478 0 : rdev->pm.requested_clock_mode_index = 0;
479 0 : rdev->pm.dynpm_can_downclock = false;
480 : }
481 : /* don't use the power state if crtcs are active and no display flag is set */
482 0 : if ((rdev->pm.active_crtc_count > 0) &&
483 0 : (rdev->pm.power_state[rdev->pm.requested_power_state_index].
484 0 : clock_info[rdev->pm.requested_clock_mode_index].flags &
485 : RADEON_PM_MODE_NO_DISPLAY)) {
486 0 : rdev->pm.requested_clock_mode_index++;
487 0 : }
488 : break;
489 : case DYNPM_ACTION_UPCLOCK:
490 0 : if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
491 0 : if (rdev->pm.current_clock_mode_index ==
492 0 : (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
493 0 : rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
494 0 : rdev->pm.dynpm_can_upclock = false;
495 0 : } else
496 0 : rdev->pm.requested_clock_mode_index =
497 0 : rdev->pm.current_clock_mode_index + 1;
498 : } else {
499 0 : rdev->pm.requested_clock_mode_index =
500 0 : rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
501 0 : rdev->pm.dynpm_can_upclock = false;
502 : }
503 : break;
504 : case DYNPM_ACTION_DEFAULT:
505 0 : rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
506 0 : rdev->pm.requested_clock_mode_index = 0;
507 0 : rdev->pm.dynpm_can_upclock = false;
508 0 : break;
509 : case DYNPM_ACTION_NONE:
510 : default:
511 0 : DRM_ERROR("Requested mode for not defined action\n");
512 0 : return;
513 : }
514 : }
515 :
516 : DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
517 : rdev->pm.power_state[rdev->pm.requested_power_state_index].
518 : clock_info[rdev->pm.requested_clock_mode_index].sclk,
519 : rdev->pm.power_state[rdev->pm.requested_power_state_index].
520 : clock_info[rdev->pm.requested_clock_mode_index].mclk,
521 : rdev->pm.power_state[rdev->pm.requested_power_state_index].
522 : pcie_lanes);
523 0 : }
524 :
525 0 : void rs780_pm_init_profile(struct radeon_device *rdev)
526 : {
527 0 : if (rdev->pm.num_power_states == 2) {
528 : /* default */
529 0 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
530 0 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
531 0 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
532 0 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
533 : /* low sh */
534 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
535 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
536 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
537 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
538 : /* mid sh */
539 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
540 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
541 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
542 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
543 : /* high sh */
544 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
545 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
546 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
547 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
548 : /* low mh */
549 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
550 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
551 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
552 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
553 : /* mid mh */
554 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
555 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
556 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
557 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
558 : /* high mh */
559 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
560 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
561 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
562 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
563 0 : } else if (rdev->pm.num_power_states == 3) {
564 : /* default */
565 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
566 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
567 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
568 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
569 : /* low sh */
570 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
571 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
572 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
573 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
574 : /* mid sh */
575 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
576 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
577 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
578 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
579 : /* high sh */
580 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
581 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
582 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
583 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
584 : /* low mh */
585 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
586 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
587 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
588 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
589 : /* mid mh */
590 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
591 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
592 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
593 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
594 : /* high mh */
595 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
596 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
597 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
598 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
599 0 : } else {
600 : /* default */
601 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
602 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
603 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
604 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
605 : /* low sh */
606 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
607 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
608 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
609 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
610 : /* mid sh */
611 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
612 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
613 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
614 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
615 : /* high sh */
616 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
617 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
618 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
619 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
620 : /* low mh */
621 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
622 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
623 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
624 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
625 : /* mid mh */
626 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
627 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
628 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
629 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
630 : /* high mh */
631 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
632 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
633 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
634 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
635 : }
636 0 : }
637 :
638 0 : void r600_pm_init_profile(struct radeon_device *rdev)
639 : {
640 : int idx;
641 :
642 0 : if (rdev->family == CHIP_R600) {
643 : /* XXX */
644 : /* default */
645 0 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
646 0 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
647 0 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
648 0 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
649 : /* low sh */
650 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
651 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
652 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
653 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
654 : /* mid sh */
655 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
656 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
657 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
658 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
659 : /* high sh */
660 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
661 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
662 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
663 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
664 : /* low mh */
665 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
666 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
667 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
668 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
669 : /* mid mh */
670 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
671 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
672 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
673 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
674 : /* high mh */
675 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
676 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
677 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
678 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
679 0 : } else {
680 0 : if (rdev->pm.num_power_states < 4) {
681 : /* default */
682 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
683 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
684 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
685 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
686 : /* low sh */
687 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
688 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
689 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
690 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
691 : /* mid sh */
692 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
693 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
694 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
695 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
696 : /* high sh */
697 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
698 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
699 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
700 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
701 : /* low mh */
702 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
703 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
704 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
705 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
706 : /* low mh */
707 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
708 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
709 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
710 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
711 : /* high mh */
712 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
713 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
714 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
715 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
716 0 : } else {
717 : /* default */
718 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
719 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
720 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
721 : rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
722 : /* low sh */
723 0 : if (rdev->flags & RADEON_IS_MOBILITY)
724 0 : idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
725 : else
726 0 : idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
727 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
728 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
729 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
730 0 : rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
731 : /* mid sh */
732 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
733 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
734 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
735 0 : rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
736 : /* high sh */
737 0 : idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
738 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
739 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
740 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
741 0 : rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
742 : /* low mh */
743 0 : if (rdev->flags & RADEON_IS_MOBILITY)
744 0 : idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
745 : else
746 0 : idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
747 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
748 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
749 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
750 0 : rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
751 : /* mid mh */
752 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
753 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
754 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
755 0 : rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
756 : /* high mh */
757 0 : idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
758 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
759 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
760 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
761 0 : rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
762 : }
763 : }
764 0 : }
765 :
766 0 : void r600_pm_misc(struct radeon_device *rdev)
767 : {
768 0 : int req_ps_idx = rdev->pm.requested_power_state_index;
769 0 : int req_cm_idx = rdev->pm.requested_clock_mode_index;
770 0 : struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
771 0 : struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
772 :
773 0 : if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
774 : /* 0xff01 is a flag rather then an actual voltage */
775 0 : if (voltage->voltage == 0xff01)
776 0 : return;
777 0 : if (voltage->voltage != rdev->pm.current_vddc) {
778 0 : radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
779 0 : rdev->pm.current_vddc = voltage->voltage;
780 : DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
781 0 : }
782 : }
783 0 : }
784 :
785 0 : bool r600_gui_idle(struct radeon_device *rdev)
786 : {
787 0 : if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
788 0 : return false;
789 : else
790 0 : return true;
791 0 : }
792 :
793 : /* hpd for digital panel detect/disconnect */
794 0 : bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
795 : {
796 : bool connected = false;
797 :
798 0 : if (ASIC_IS_DCE3(rdev)) {
799 0 : switch (hpd) {
800 : case RADEON_HPD_1:
801 0 : if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
802 0 : connected = true;
803 : break;
804 : case RADEON_HPD_2:
805 0 : if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
806 0 : connected = true;
807 : break;
808 : case RADEON_HPD_3:
809 0 : if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
810 0 : connected = true;
811 : break;
812 : case RADEON_HPD_4:
813 0 : if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
814 0 : connected = true;
815 : break;
816 : /* DCE 3.2 */
817 : case RADEON_HPD_5:
818 0 : if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
819 0 : connected = true;
820 : break;
821 : case RADEON_HPD_6:
822 0 : if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
823 0 : connected = true;
824 : break;
825 : default:
826 : break;
827 : }
828 : } else {
829 0 : switch (hpd) {
830 : case RADEON_HPD_1:
831 0 : if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
832 0 : connected = true;
833 : break;
834 : case RADEON_HPD_2:
835 0 : if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
836 0 : connected = true;
837 : break;
838 : case RADEON_HPD_3:
839 0 : if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
840 0 : connected = true;
841 : break;
842 : default:
843 : break;
844 : }
845 : }
846 0 : return connected;
847 : }
848 :
849 0 : void r600_hpd_set_polarity(struct radeon_device *rdev,
850 : enum radeon_hpd_id hpd)
851 : {
852 : u32 tmp;
853 0 : bool connected = r600_hpd_sense(rdev, hpd);
854 :
855 0 : if (ASIC_IS_DCE3(rdev)) {
856 0 : switch (hpd) {
857 : case RADEON_HPD_1:
858 0 : tmp = RREG32(DC_HPD1_INT_CONTROL);
859 0 : if (connected)
860 0 : tmp &= ~DC_HPDx_INT_POLARITY;
861 : else
862 0 : tmp |= DC_HPDx_INT_POLARITY;
863 0 : WREG32(DC_HPD1_INT_CONTROL, tmp);
864 0 : break;
865 : case RADEON_HPD_2:
866 0 : tmp = RREG32(DC_HPD2_INT_CONTROL);
867 0 : if (connected)
868 0 : tmp &= ~DC_HPDx_INT_POLARITY;
869 : else
870 0 : tmp |= DC_HPDx_INT_POLARITY;
871 0 : WREG32(DC_HPD2_INT_CONTROL, tmp);
872 0 : break;
873 : case RADEON_HPD_3:
874 0 : tmp = RREG32(DC_HPD3_INT_CONTROL);
875 0 : if (connected)
876 0 : tmp &= ~DC_HPDx_INT_POLARITY;
877 : else
878 0 : tmp |= DC_HPDx_INT_POLARITY;
879 0 : WREG32(DC_HPD3_INT_CONTROL, tmp);
880 0 : break;
881 : case RADEON_HPD_4:
882 0 : tmp = RREG32(DC_HPD4_INT_CONTROL);
883 0 : if (connected)
884 0 : tmp &= ~DC_HPDx_INT_POLARITY;
885 : else
886 0 : tmp |= DC_HPDx_INT_POLARITY;
887 0 : WREG32(DC_HPD4_INT_CONTROL, tmp);
888 0 : break;
889 : case RADEON_HPD_5:
890 0 : tmp = RREG32(DC_HPD5_INT_CONTROL);
891 0 : if (connected)
892 0 : tmp &= ~DC_HPDx_INT_POLARITY;
893 : else
894 0 : tmp |= DC_HPDx_INT_POLARITY;
895 0 : WREG32(DC_HPD5_INT_CONTROL, tmp);
896 0 : break;
897 : /* DCE 3.2 */
898 : case RADEON_HPD_6:
899 0 : tmp = RREG32(DC_HPD6_INT_CONTROL);
900 0 : if (connected)
901 0 : tmp &= ~DC_HPDx_INT_POLARITY;
902 : else
903 0 : tmp |= DC_HPDx_INT_POLARITY;
904 0 : WREG32(DC_HPD6_INT_CONTROL, tmp);
905 0 : break;
906 : default:
907 : break;
908 : }
909 : } else {
910 0 : switch (hpd) {
911 : case RADEON_HPD_1:
912 0 : tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
913 0 : if (connected)
914 0 : tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
915 : else
916 0 : tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
917 0 : WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
918 0 : break;
919 : case RADEON_HPD_2:
920 0 : tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
921 0 : if (connected)
922 0 : tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
923 : else
924 0 : tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
925 0 : WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
926 0 : break;
927 : case RADEON_HPD_3:
928 0 : tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
929 0 : if (connected)
930 0 : tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
931 : else
932 0 : tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
933 0 : WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
934 0 : break;
935 : default:
936 : break;
937 : }
938 : }
939 0 : }
940 :
941 0 : void r600_hpd_init(struct radeon_device *rdev)
942 : {
943 0 : struct drm_device *dev = rdev->ddev;
944 : struct drm_connector *connector;
945 : unsigned enable = 0;
946 :
947 0 : list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
948 0 : struct radeon_connector *radeon_connector = to_radeon_connector(connector);
949 :
950 0 : if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
951 0 : connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
952 : /* don't try to enable hpd on eDP or LVDS avoid breaking the
953 : * aux dp channel on imac and help (but not completely fix)
954 : * https://bugzilla.redhat.com/show_bug.cgi?id=726143
955 : */
956 0 : continue;
957 : }
958 0 : if (ASIC_IS_DCE3(rdev)) {
959 : u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
960 0 : if (ASIC_IS_DCE32(rdev))
961 0 : tmp |= DC_HPDx_EN;
962 :
963 0 : switch (radeon_connector->hpd.hpd) {
964 : case RADEON_HPD_1:
965 0 : WREG32(DC_HPD1_CONTROL, tmp);
966 0 : break;
967 : case RADEON_HPD_2:
968 0 : WREG32(DC_HPD2_CONTROL, tmp);
969 0 : break;
970 : case RADEON_HPD_3:
971 0 : WREG32(DC_HPD3_CONTROL, tmp);
972 0 : break;
973 : case RADEON_HPD_4:
974 0 : WREG32(DC_HPD4_CONTROL, tmp);
975 0 : break;
976 : /* DCE 3.2 */
977 : case RADEON_HPD_5:
978 0 : WREG32(DC_HPD5_CONTROL, tmp);
979 0 : break;
980 : case RADEON_HPD_6:
981 0 : WREG32(DC_HPD6_CONTROL, tmp);
982 0 : break;
983 : default:
984 : break;
985 : }
986 0 : } else {
987 0 : switch (radeon_connector->hpd.hpd) {
988 : case RADEON_HPD_1:
989 0 : WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
990 0 : break;
991 : case RADEON_HPD_2:
992 0 : WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
993 0 : break;
994 : case RADEON_HPD_3:
995 0 : WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
996 0 : break;
997 : default:
998 : break;
999 : }
1000 : }
1001 0 : enable |= 1 << radeon_connector->hpd.hpd;
1002 0 : radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1003 0 : }
1004 0 : radeon_irq_kms_enable_hpd(rdev, enable);
1005 0 : }
1006 :
1007 0 : void r600_hpd_fini(struct radeon_device *rdev)
1008 : {
1009 0 : struct drm_device *dev = rdev->ddev;
1010 : struct drm_connector *connector;
1011 : unsigned disable = 0;
1012 :
1013 0 : list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1014 0 : struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1015 0 : if (ASIC_IS_DCE3(rdev)) {
1016 0 : switch (radeon_connector->hpd.hpd) {
1017 : case RADEON_HPD_1:
1018 0 : WREG32(DC_HPD1_CONTROL, 0);
1019 0 : break;
1020 : case RADEON_HPD_2:
1021 0 : WREG32(DC_HPD2_CONTROL, 0);
1022 0 : break;
1023 : case RADEON_HPD_3:
1024 0 : WREG32(DC_HPD3_CONTROL, 0);
1025 0 : break;
1026 : case RADEON_HPD_4:
1027 0 : WREG32(DC_HPD4_CONTROL, 0);
1028 0 : break;
1029 : /* DCE 3.2 */
1030 : case RADEON_HPD_5:
1031 0 : WREG32(DC_HPD5_CONTROL, 0);
1032 0 : break;
1033 : case RADEON_HPD_6:
1034 0 : WREG32(DC_HPD6_CONTROL, 0);
1035 0 : break;
1036 : default:
1037 : break;
1038 : }
1039 : } else {
1040 0 : switch (radeon_connector->hpd.hpd) {
1041 : case RADEON_HPD_1:
1042 0 : WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
1043 0 : break;
1044 : case RADEON_HPD_2:
1045 0 : WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
1046 0 : break;
1047 : case RADEON_HPD_3:
1048 0 : WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
1049 0 : break;
1050 : default:
1051 : break;
1052 : }
1053 : }
1054 0 : disable |= 1 << radeon_connector->hpd.hpd;
1055 : }
1056 0 : radeon_irq_kms_disable_hpd(rdev, disable);
1057 0 : }
1058 :
1059 : /*
1060 : * R600 PCIE GART
1061 : */
1062 0 : void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
1063 : {
1064 : unsigned i;
1065 : u32 tmp;
1066 :
1067 : /* flush hdp cache so updates hit vram */
1068 0 : if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
1069 0 : !(rdev->flags & RADEON_IS_AGP)) {
1070 0 : void __iomem *ptr = (void *)rdev->gart.ptr;
1071 : u32 tmp;
1072 :
1073 : /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
1074 : * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
1075 : * This seems to cause problems on some AGP cards. Just use the old
1076 : * method for them.
1077 : */
1078 0 : WREG32(HDP_DEBUG1, 0);
1079 0 : tmp = readl((void __iomem *)ptr);
1080 0 : } else
1081 0 : WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1082 :
1083 0 : WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
1084 0 : WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
1085 0 : WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1086 0 : for (i = 0; i < rdev->usec_timeout; i++) {
1087 : /* read MC_STATUS */
1088 0 : tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1089 0 : tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1090 0 : if (tmp == 2) {
1091 0 : printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1092 0 : return;
1093 : }
1094 0 : if (tmp) {
1095 0 : return;
1096 : }
1097 0 : udelay(1);
1098 : }
1099 0 : }
1100 :
1101 0 : int r600_pcie_gart_init(struct radeon_device *rdev)
1102 : {
1103 : int r;
1104 :
1105 0 : if (rdev->gart.robj) {
1106 0 : WARN(1, "R600 PCIE GART already initialized\n");
1107 0 : return 0;
1108 : }
1109 : /* Initialize common gart structure */
1110 0 : r = radeon_gart_init(rdev);
1111 0 : if (r)
1112 0 : return r;
1113 0 : rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
1114 0 : return radeon_gart_table_vram_alloc(rdev);
1115 0 : }
1116 :
1117 0 : static int r600_pcie_gart_enable(struct radeon_device *rdev)
1118 : {
1119 : u32 tmp;
1120 : int r, i;
1121 :
1122 0 : if (rdev->gart.robj == NULL) {
1123 0 : dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1124 0 : return -EINVAL;
1125 : }
1126 0 : r = radeon_gart_table_vram_pin(rdev);
1127 0 : if (r)
1128 0 : return r;
1129 :
1130 : /* Setup L2 cache */
1131 0 : WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1132 : ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1133 : EFFECTIVE_L2_QUEUE_SIZE(7));
1134 0 : WREG32(VM_L2_CNTL2, 0);
1135 0 : WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1136 : /* Setup TLB control */
1137 : tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1138 : SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1139 : EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1140 : ENABLE_WAIT_L2_QUERY;
1141 0 : WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1142 0 : WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1143 0 : WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1144 0 : WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1145 0 : WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1146 0 : WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1147 0 : WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1148 0 : WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1149 0 : WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1150 0 : WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1151 0 : WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1152 0 : WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1153 0 : WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1154 0 : WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1155 0 : WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1156 0 : WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1157 0 : WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1158 0 : WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1159 0 : WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1160 0 : WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1161 : RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1162 0 : WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1163 : (u32)(rdev->dummy_page.addr >> 12));
1164 0 : for (i = 1; i < 7; i++)
1165 0 : WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1166 :
1167 0 : r600_pcie_gart_tlb_flush(rdev);
1168 : DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1169 : (unsigned)(rdev->mc.gtt_size >> 20),
1170 : (unsigned long long)rdev->gart.table_addr);
1171 0 : rdev->gart.ready = true;
1172 0 : return 0;
1173 0 : }
1174 :
1175 0 : static void r600_pcie_gart_disable(struct radeon_device *rdev)
1176 : {
1177 : u32 tmp;
1178 : int i;
1179 :
1180 : /* Disable all tables */
1181 0 : for (i = 0; i < 7; i++)
1182 0 : WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1183 :
1184 : /* Disable L2 cache */
1185 0 : WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1186 : EFFECTIVE_L2_QUEUE_SIZE(7));
1187 0 : WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1188 : /* Setup L1 TLB control */
1189 : tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1190 : ENABLE_WAIT_L2_QUERY;
1191 0 : WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1192 0 : WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1193 0 : WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1194 0 : WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1195 0 : WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1196 0 : WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1197 0 : WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1198 0 : WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1199 0 : WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1200 0 : WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1201 0 : WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1202 0 : WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1203 0 : WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1204 0 : WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1205 0 : WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1206 0 : WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1207 0 : radeon_gart_table_vram_unpin(rdev);
1208 0 : }
1209 :
1210 0 : static void r600_pcie_gart_fini(struct radeon_device *rdev)
1211 : {
1212 0 : radeon_gart_fini(rdev);
1213 0 : r600_pcie_gart_disable(rdev);
1214 0 : radeon_gart_table_vram_free(rdev);
1215 0 : }
1216 :
1217 0 : static void r600_agp_enable(struct radeon_device *rdev)
1218 : {
1219 : u32 tmp;
1220 : int i;
1221 :
1222 : /* Setup L2 cache */
1223 0 : WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1224 : ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1225 : EFFECTIVE_L2_QUEUE_SIZE(7));
1226 0 : WREG32(VM_L2_CNTL2, 0);
1227 0 : WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1228 : /* Setup TLB control */
1229 : tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1230 : SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1231 : EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1232 : ENABLE_WAIT_L2_QUERY;
1233 0 : WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1234 0 : WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1235 0 : WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1236 0 : WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1237 0 : WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1238 0 : WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1239 0 : WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1240 0 : WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1241 0 : WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1242 0 : WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1243 0 : WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1244 0 : WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1245 0 : WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1246 0 : WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1247 0 : for (i = 0; i < 7; i++)
1248 0 : WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1249 0 : }
1250 :
1251 0 : int r600_mc_wait_for_idle(struct radeon_device *rdev)
1252 : {
1253 : unsigned i;
1254 : u32 tmp;
1255 :
1256 0 : for (i = 0; i < rdev->usec_timeout; i++) {
1257 : /* read MC_STATUS */
1258 0 : tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1259 0 : if (!tmp)
1260 0 : return 0;
1261 0 : udelay(1);
1262 : }
1263 0 : return -1;
1264 0 : }
1265 :
1266 0 : uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1267 : {
1268 : unsigned long flags;
1269 : uint32_t r;
1270 :
1271 0 : spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1272 0 : WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1273 0 : r = RREG32(R_0028FC_MC_DATA);
1274 0 : WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1275 0 : spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1276 0 : return r;
1277 : }
1278 :
1279 0 : void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1280 : {
1281 : unsigned long flags;
1282 :
1283 0 : spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1284 0 : WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1285 : S_0028F8_MC_IND_WR_EN(1));
1286 0 : WREG32(R_0028FC_MC_DATA, v);
1287 0 : WREG32(R_0028F8_MC_INDEX, 0x7F);
1288 0 : spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1289 0 : }
1290 :
1291 0 : static void r600_mc_program(struct radeon_device *rdev)
1292 : {
1293 0 : struct rv515_mc_save save;
1294 : u32 tmp;
1295 : int i, j;
1296 :
1297 : /* Initialize HDP */
1298 0 : for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1299 0 : WREG32((0x2c14 + j), 0x00000000);
1300 0 : WREG32((0x2c18 + j), 0x00000000);
1301 0 : WREG32((0x2c1c + j), 0x00000000);
1302 0 : WREG32((0x2c20 + j), 0x00000000);
1303 0 : WREG32((0x2c24 + j), 0x00000000);
1304 : }
1305 0 : WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1306 :
1307 0 : rv515_mc_stop(rdev, &save);
1308 0 : if (r600_mc_wait_for_idle(rdev)) {
1309 0 : dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1310 0 : }
1311 : /* Lockout access through VGA aperture (doesn't exist before R600) */
1312 0 : WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1313 : /* Update configuration */
1314 0 : if (rdev->flags & RADEON_IS_AGP) {
1315 0 : if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1316 : /* VRAM before AGP */
1317 0 : WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1318 : rdev->mc.vram_start >> 12);
1319 0 : WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1320 : rdev->mc.gtt_end >> 12);
1321 0 : } else {
1322 : /* VRAM after AGP */
1323 0 : WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1324 : rdev->mc.gtt_start >> 12);
1325 0 : WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1326 : rdev->mc.vram_end >> 12);
1327 : }
1328 : } else {
1329 0 : WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1330 0 : WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1331 : }
1332 0 : WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1333 0 : tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1334 0 : tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1335 0 : WREG32(MC_VM_FB_LOCATION, tmp);
1336 0 : WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1337 0 : WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1338 0 : WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1339 0 : if (rdev->flags & RADEON_IS_AGP) {
1340 0 : WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1341 0 : WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1342 0 : WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1343 0 : } else {
1344 0 : WREG32(MC_VM_AGP_BASE, 0);
1345 0 : WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1346 0 : WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1347 : }
1348 0 : if (r600_mc_wait_for_idle(rdev)) {
1349 0 : dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1350 0 : }
1351 0 : rv515_mc_resume(rdev, &save);
1352 : /* we need to own VRAM, so turn off the VGA renderer here
1353 : * to stop it overwriting our objects */
1354 0 : rv515_vga_render_disable(rdev);
1355 0 : }
1356 :
1357 : /**
1358 : * r600_vram_gtt_location - try to find VRAM & GTT location
1359 : * @rdev: radeon device structure holding all necessary informations
1360 : * @mc: memory controller structure holding memory informations
1361 : *
1362 : * Function will place try to place VRAM at same place as in CPU (PCI)
1363 : * address space as some GPU seems to have issue when we reprogram at
1364 : * different address space.
1365 : *
1366 : * If there is not enough space to fit the unvisible VRAM after the
1367 : * aperture then we limit the VRAM size to the aperture.
1368 : *
1369 : * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1370 : * them to be in one from GPU point of view so that we can program GPU to
1371 : * catch access outside them (weird GPU policy see ??).
1372 : *
1373 : * This function will never fails, worst case are limiting VRAM or GTT.
1374 : *
1375 : * Note: GTT start, end, size should be initialized before calling this
1376 : * function on AGP platform.
1377 : */
1378 0 : static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1379 : {
1380 : u64 size_bf, size_af;
1381 :
1382 0 : if (mc->mc_vram_size > 0xE0000000) {
1383 : /* leave room for at least 512M GTT */
1384 0 : dev_warn(rdev->dev, "limiting VRAM\n");
1385 0 : mc->real_vram_size = 0xE0000000;
1386 0 : mc->mc_vram_size = 0xE0000000;
1387 0 : }
1388 0 : if (rdev->flags & RADEON_IS_AGP) {
1389 0 : size_bf = mc->gtt_start;
1390 0 : size_af = mc->mc_mask - mc->gtt_end;
1391 0 : if (size_bf > size_af) {
1392 0 : if (mc->mc_vram_size > size_bf) {
1393 0 : dev_warn(rdev->dev, "limiting VRAM\n");
1394 0 : mc->real_vram_size = size_bf;
1395 0 : mc->mc_vram_size = size_bf;
1396 0 : }
1397 0 : mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1398 0 : } else {
1399 0 : if (mc->mc_vram_size > size_af) {
1400 0 : dev_warn(rdev->dev, "limiting VRAM\n");
1401 0 : mc->real_vram_size = size_af;
1402 0 : mc->mc_vram_size = size_af;
1403 0 : }
1404 0 : mc->vram_start = mc->gtt_end + 1;
1405 : }
1406 0 : mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1407 : dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1408 : mc->mc_vram_size >> 20, mc->vram_start,
1409 : mc->vram_end, mc->real_vram_size >> 20);
1410 0 : } else {
1411 : u64 base = 0;
1412 0 : if (rdev->flags & RADEON_IS_IGP) {
1413 0 : base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1414 0 : base <<= 24;
1415 0 : }
1416 0 : radeon_vram_location(rdev, &rdev->mc, base);
1417 0 : rdev->mc.gtt_base_align = 0;
1418 0 : radeon_gtt_location(rdev, mc);
1419 : }
1420 0 : }
1421 :
1422 0 : static int r600_mc_init(struct radeon_device *rdev)
1423 : {
1424 : u32 tmp;
1425 : int chansize, numchan;
1426 : uint32_t h_addr, l_addr;
1427 : unsigned long long k8_addr;
1428 :
1429 : /* Get VRAM informations */
1430 0 : rdev->mc.vram_is_ddr = true;
1431 0 : tmp = RREG32(RAMCFG);
1432 0 : if (tmp & CHANSIZE_OVERRIDE) {
1433 : chansize = 16;
1434 0 : } else if (tmp & CHANSIZE_MASK) {
1435 : chansize = 64;
1436 0 : } else {
1437 : chansize = 32;
1438 : }
1439 0 : tmp = RREG32(CHMAP);
1440 0 : switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1441 : case 0:
1442 : default:
1443 : numchan = 1;
1444 0 : break;
1445 : case 1:
1446 : numchan = 2;
1447 0 : break;
1448 : case 2:
1449 : numchan = 4;
1450 0 : break;
1451 : case 3:
1452 : numchan = 8;
1453 0 : break;
1454 : }
1455 0 : rdev->mc.vram_width = numchan * chansize;
1456 : /* Could aper size report 0 ? */
1457 0 : rdev->mc.aper_base = rdev->fb_aper_offset;
1458 0 : rdev->mc.aper_size = rdev->fb_aper_size;
1459 : /* Setup GPU memory space */
1460 0 : rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1461 0 : rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1462 0 : rdev->mc.visible_vram_size = rdev->mc.aper_size;
1463 0 : r600_vram_gtt_location(rdev, &rdev->mc);
1464 :
1465 0 : if (rdev->flags & RADEON_IS_IGP) {
1466 0 : rs690_pm_info(rdev);
1467 0 : rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1468 :
1469 0 : if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1470 : /* Use K8 direct mapping for fast fb access. */
1471 0 : rdev->fastfb_working = false;
1472 0 : h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1473 0 : l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1474 0 : k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1475 : #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1476 : if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1477 : #endif
1478 : {
1479 : /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1480 : * memory is present.
1481 : */
1482 0 : if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1483 : DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1484 : (unsigned long long)rdev->mc.aper_base, k8_addr);
1485 0 : rdev->mc.aper_base = (resource_size_t)k8_addr;
1486 0 : rdev->fastfb_working = true;
1487 0 : }
1488 : }
1489 : }
1490 : }
1491 :
1492 0 : radeon_update_bandwidth_info(rdev);
1493 0 : return 0;
1494 : }
1495 :
1496 0 : int r600_vram_scratch_init(struct radeon_device *rdev)
1497 : {
1498 : int r;
1499 :
1500 0 : if (rdev->vram_scratch.robj == NULL) {
1501 0 : r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1502 : PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1503 : 0, NULL, NULL, &rdev->vram_scratch.robj);
1504 0 : if (r) {
1505 0 : return r;
1506 : }
1507 : }
1508 :
1509 0 : r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1510 0 : if (unlikely(r != 0))
1511 0 : return r;
1512 0 : r = radeon_bo_pin(rdev->vram_scratch.robj,
1513 0 : RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1514 0 : if (r) {
1515 0 : radeon_bo_unreserve(rdev->vram_scratch.robj);
1516 0 : return r;
1517 : }
1518 0 : r = radeon_bo_kmap(rdev->vram_scratch.robj,
1519 0 : (void **)&rdev->vram_scratch.ptr);
1520 0 : if (r)
1521 0 : radeon_bo_unpin(rdev->vram_scratch.robj);
1522 0 : radeon_bo_unreserve(rdev->vram_scratch.robj);
1523 :
1524 0 : return r;
1525 0 : }
1526 :
1527 0 : void r600_vram_scratch_fini(struct radeon_device *rdev)
1528 : {
1529 : int r;
1530 :
1531 0 : if (rdev->vram_scratch.robj == NULL) {
1532 0 : return;
1533 : }
1534 0 : r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1535 0 : if (likely(r == 0)) {
1536 0 : radeon_bo_kunmap(rdev->vram_scratch.robj);
1537 0 : radeon_bo_unpin(rdev->vram_scratch.robj);
1538 0 : radeon_bo_unreserve(rdev->vram_scratch.robj);
1539 0 : }
1540 0 : radeon_bo_unref(&rdev->vram_scratch.robj);
1541 0 : }
1542 :
1543 0 : void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1544 : {
1545 0 : u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1546 :
1547 0 : if (hung)
1548 0 : tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1549 : else
1550 0 : tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1551 :
1552 0 : WREG32(R600_BIOS_3_SCRATCH, tmp);
1553 0 : }
1554 :
1555 0 : static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1556 : {
1557 : dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1558 : RREG32(R_008010_GRBM_STATUS));
1559 : dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1560 : RREG32(R_008014_GRBM_STATUS2));
1561 : dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1562 : RREG32(R_000E50_SRBM_STATUS));
1563 : dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1564 : RREG32(CP_STALLED_STAT1));
1565 : dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1566 : RREG32(CP_STALLED_STAT2));
1567 : dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1568 : RREG32(CP_BUSY_STAT));
1569 : dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1570 : RREG32(CP_STAT));
1571 : dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1572 : RREG32(DMA_STATUS_REG));
1573 0 : }
1574 :
1575 0 : static bool r600_is_display_hung(struct radeon_device *rdev)
1576 : {
1577 : u32 crtc_hung = 0;
1578 0 : u32 crtc_status[2];
1579 : u32 i, j, tmp;
1580 :
1581 0 : for (i = 0; i < rdev->num_crtc; i++) {
1582 0 : if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1583 0 : crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1584 0 : crtc_hung |= (1 << i);
1585 0 : }
1586 : }
1587 :
1588 0 : for (j = 0; j < 10; j++) {
1589 0 : for (i = 0; i < rdev->num_crtc; i++) {
1590 0 : if (crtc_hung & (1 << i)) {
1591 0 : tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1592 0 : if (tmp != crtc_status[i])
1593 0 : crtc_hung &= ~(1 << i);
1594 : }
1595 : }
1596 0 : if (crtc_hung == 0)
1597 0 : return false;
1598 0 : udelay(100);
1599 : }
1600 :
1601 0 : return true;
1602 0 : }
1603 :
1604 0 : u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1605 : {
1606 : u32 reset_mask = 0;
1607 : u32 tmp;
1608 :
1609 : /* GRBM_STATUS */
1610 0 : tmp = RREG32(R_008010_GRBM_STATUS);
1611 0 : if (rdev->family >= CHIP_RV770) {
1612 0 : if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1613 0 : G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1614 0 : G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1615 0 : G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1616 0 : G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1617 0 : reset_mask |= RADEON_RESET_GFX;
1618 : } else {
1619 0 : if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1620 0 : G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1621 0 : G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1622 0 : G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1623 0 : G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1624 0 : reset_mask |= RADEON_RESET_GFX;
1625 : }
1626 :
1627 0 : if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1628 0 : G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1629 0 : reset_mask |= RADEON_RESET_CP;
1630 :
1631 0 : if (G_008010_GRBM_EE_BUSY(tmp))
1632 0 : reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1633 :
1634 : /* DMA_STATUS_REG */
1635 0 : tmp = RREG32(DMA_STATUS_REG);
1636 0 : if (!(tmp & DMA_IDLE))
1637 0 : reset_mask |= RADEON_RESET_DMA;
1638 :
1639 : /* SRBM_STATUS */
1640 0 : tmp = RREG32(R_000E50_SRBM_STATUS);
1641 0 : if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1642 0 : reset_mask |= RADEON_RESET_RLC;
1643 :
1644 0 : if (G_000E50_IH_BUSY(tmp))
1645 0 : reset_mask |= RADEON_RESET_IH;
1646 :
1647 0 : if (G_000E50_SEM_BUSY(tmp))
1648 0 : reset_mask |= RADEON_RESET_SEM;
1649 :
1650 0 : if (G_000E50_GRBM_RQ_PENDING(tmp))
1651 0 : reset_mask |= RADEON_RESET_GRBM;
1652 :
1653 0 : if (G_000E50_VMC_BUSY(tmp))
1654 0 : reset_mask |= RADEON_RESET_VMC;
1655 :
1656 0 : if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1657 0 : G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1658 0 : G_000E50_MCDW_BUSY(tmp))
1659 0 : reset_mask |= RADEON_RESET_MC;
1660 :
1661 0 : if (r600_is_display_hung(rdev))
1662 0 : reset_mask |= RADEON_RESET_DISPLAY;
1663 :
1664 : /* Skip MC reset as it's mostly likely not hung, just busy */
1665 0 : if (reset_mask & RADEON_RESET_MC) {
1666 : DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1667 0 : reset_mask &= ~RADEON_RESET_MC;
1668 0 : }
1669 :
1670 0 : return reset_mask;
1671 : }
1672 :
1673 0 : static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1674 : {
1675 0 : struct rv515_mc_save save;
1676 : u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1677 : u32 tmp;
1678 :
1679 0 : if (reset_mask == 0)
1680 0 : return;
1681 :
1682 : dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1683 :
1684 0 : r600_print_gpu_status_regs(rdev);
1685 :
1686 : /* Disable CP parsing/prefetching */
1687 0 : if (rdev->family >= CHIP_RV770)
1688 0 : WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1689 : else
1690 0 : WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1691 :
1692 : /* disable the RLC */
1693 0 : WREG32(RLC_CNTL, 0);
1694 :
1695 0 : if (reset_mask & RADEON_RESET_DMA) {
1696 : /* Disable DMA */
1697 0 : tmp = RREG32(DMA_RB_CNTL);
1698 0 : tmp &= ~DMA_RB_ENABLE;
1699 0 : WREG32(DMA_RB_CNTL, tmp);
1700 0 : }
1701 :
1702 0 : mdelay(50);
1703 :
1704 0 : rv515_mc_stop(rdev, &save);
1705 0 : if (r600_mc_wait_for_idle(rdev)) {
1706 0 : dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1707 0 : }
1708 :
1709 0 : if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1710 0 : if (rdev->family >= CHIP_RV770)
1711 0 : grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1712 : S_008020_SOFT_RESET_CB(1) |
1713 : S_008020_SOFT_RESET_PA(1) |
1714 : S_008020_SOFT_RESET_SC(1) |
1715 : S_008020_SOFT_RESET_SPI(1) |
1716 : S_008020_SOFT_RESET_SX(1) |
1717 : S_008020_SOFT_RESET_SH(1) |
1718 : S_008020_SOFT_RESET_TC(1) |
1719 : S_008020_SOFT_RESET_TA(1) |
1720 : S_008020_SOFT_RESET_VC(1) |
1721 : S_008020_SOFT_RESET_VGT(1);
1722 : else
1723 : grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1724 : S_008020_SOFT_RESET_DB(1) |
1725 : S_008020_SOFT_RESET_CB(1) |
1726 : S_008020_SOFT_RESET_PA(1) |
1727 : S_008020_SOFT_RESET_SC(1) |
1728 : S_008020_SOFT_RESET_SMX(1) |
1729 : S_008020_SOFT_RESET_SPI(1) |
1730 : S_008020_SOFT_RESET_SX(1) |
1731 : S_008020_SOFT_RESET_SH(1) |
1732 : S_008020_SOFT_RESET_TC(1) |
1733 : S_008020_SOFT_RESET_TA(1) |
1734 : S_008020_SOFT_RESET_VC(1) |
1735 : S_008020_SOFT_RESET_VGT(1);
1736 : }
1737 :
1738 0 : if (reset_mask & RADEON_RESET_CP) {
1739 0 : grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1740 : S_008020_SOFT_RESET_VGT(1);
1741 :
1742 : srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1743 0 : }
1744 :
1745 0 : if (reset_mask & RADEON_RESET_DMA) {
1746 0 : if (rdev->family >= CHIP_RV770)
1747 0 : srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1748 : else
1749 0 : srbm_soft_reset |= SOFT_RESET_DMA;
1750 : }
1751 :
1752 0 : if (reset_mask & RADEON_RESET_RLC)
1753 0 : srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1754 :
1755 0 : if (reset_mask & RADEON_RESET_SEM)
1756 0 : srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1757 :
1758 0 : if (reset_mask & RADEON_RESET_IH)
1759 0 : srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1760 :
1761 0 : if (reset_mask & RADEON_RESET_GRBM)
1762 0 : srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1763 :
1764 0 : if (!(rdev->flags & RADEON_IS_IGP)) {
1765 0 : if (reset_mask & RADEON_RESET_MC)
1766 0 : srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1767 : }
1768 :
1769 0 : if (reset_mask & RADEON_RESET_VMC)
1770 0 : srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1771 :
1772 0 : if (grbm_soft_reset) {
1773 0 : tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1774 0 : tmp |= grbm_soft_reset;
1775 : dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1776 0 : WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1777 0 : tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1778 :
1779 0 : udelay(50);
1780 :
1781 0 : tmp &= ~grbm_soft_reset;
1782 0 : WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1783 0 : tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1784 0 : }
1785 :
1786 0 : if (srbm_soft_reset) {
1787 0 : tmp = RREG32(SRBM_SOFT_RESET);
1788 0 : tmp |= srbm_soft_reset;
1789 : dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1790 0 : WREG32(SRBM_SOFT_RESET, tmp);
1791 0 : tmp = RREG32(SRBM_SOFT_RESET);
1792 :
1793 0 : udelay(50);
1794 :
1795 0 : tmp &= ~srbm_soft_reset;
1796 0 : WREG32(SRBM_SOFT_RESET, tmp);
1797 0 : tmp = RREG32(SRBM_SOFT_RESET);
1798 0 : }
1799 :
1800 : /* Wait a little for things to settle down */
1801 0 : mdelay(1);
1802 :
1803 0 : rv515_mc_resume(rdev, &save);
1804 0 : udelay(50);
1805 :
1806 0 : r600_print_gpu_status_regs(rdev);
1807 0 : }
1808 :
1809 0 : static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1810 : {
1811 0 : struct rv515_mc_save save;
1812 : u32 tmp, i;
1813 :
1814 : dev_info(rdev->dev, "GPU pci config reset\n");
1815 :
1816 : /* disable dpm? */
1817 :
1818 : /* Disable CP parsing/prefetching */
1819 0 : if (rdev->family >= CHIP_RV770)
1820 0 : WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1821 : else
1822 0 : WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1823 :
1824 : /* disable the RLC */
1825 0 : WREG32(RLC_CNTL, 0);
1826 :
1827 : /* Disable DMA */
1828 0 : tmp = RREG32(DMA_RB_CNTL);
1829 0 : tmp &= ~DMA_RB_ENABLE;
1830 0 : WREG32(DMA_RB_CNTL, tmp);
1831 :
1832 0 : mdelay(50);
1833 :
1834 : /* set mclk/sclk to bypass */
1835 0 : if (rdev->family >= CHIP_RV770)
1836 0 : rv770_set_clk_bypass_mode(rdev);
1837 : /* disable BM */
1838 : pci_clear_master(rdev->pdev);
1839 : /* disable mem access */
1840 0 : rv515_mc_stop(rdev, &save);
1841 0 : if (r600_mc_wait_for_idle(rdev)) {
1842 0 : dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1843 0 : }
1844 :
1845 : /* BIF reset workaround. Not sure if this is needed on 6xx */
1846 0 : tmp = RREG32(BUS_CNTL);
1847 0 : tmp |= VGA_COHE_SPEC_TIMER_DIS;
1848 0 : WREG32(BUS_CNTL, tmp);
1849 :
1850 0 : tmp = RREG32(BIF_SCRATCH0);
1851 :
1852 : /* reset */
1853 0 : radeon_pci_config_reset(rdev);
1854 0 : mdelay(1);
1855 :
1856 : /* BIF reset workaround. Not sure if this is needed on 6xx */
1857 : tmp = SOFT_RESET_BIF;
1858 0 : WREG32(SRBM_SOFT_RESET, tmp);
1859 0 : mdelay(1);
1860 0 : WREG32(SRBM_SOFT_RESET, 0);
1861 :
1862 : /* wait for asic to come out of reset */
1863 0 : for (i = 0; i < rdev->usec_timeout; i++) {
1864 0 : if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1865 : break;
1866 0 : udelay(1);
1867 : }
1868 0 : }
1869 :
1870 0 : int r600_asic_reset(struct radeon_device *rdev)
1871 : {
1872 : u32 reset_mask;
1873 :
1874 0 : reset_mask = r600_gpu_check_soft_reset(rdev);
1875 :
1876 0 : if (reset_mask)
1877 0 : r600_set_bios_scratch_engine_hung(rdev, true);
1878 :
1879 : /* try soft reset */
1880 0 : r600_gpu_soft_reset(rdev, reset_mask);
1881 :
1882 0 : reset_mask = r600_gpu_check_soft_reset(rdev);
1883 :
1884 : /* try pci config reset */
1885 0 : if (reset_mask && radeon_hard_reset)
1886 0 : r600_gpu_pci_config_reset(rdev);
1887 :
1888 0 : reset_mask = r600_gpu_check_soft_reset(rdev);
1889 :
1890 0 : if (!reset_mask)
1891 0 : r600_set_bios_scratch_engine_hung(rdev, false);
1892 :
1893 0 : return 0;
1894 : }
1895 :
1896 : /**
1897 : * r600_gfx_is_lockup - Check if the GFX engine is locked up
1898 : *
1899 : * @rdev: radeon_device pointer
1900 : * @ring: radeon_ring structure holding ring information
1901 : *
1902 : * Check if the GFX engine is locked up.
1903 : * Returns true if the engine appears to be locked up, false if not.
1904 : */
1905 0 : bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1906 : {
1907 0 : u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1908 :
1909 0 : if (!(reset_mask & (RADEON_RESET_GFX |
1910 : RADEON_RESET_COMPUTE |
1911 : RADEON_RESET_CP))) {
1912 0 : radeon_ring_lockup_update(rdev, ring);
1913 0 : return false;
1914 : }
1915 0 : return radeon_ring_test_lockup(rdev, ring);
1916 0 : }
1917 :
1918 0 : u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1919 : u32 tiling_pipe_num,
1920 : u32 max_rb_num,
1921 : u32 total_max_rb_num,
1922 : u32 disabled_rb_mask)
1923 : {
1924 : u32 rendering_pipe_num, rb_num_width, req_rb_num;
1925 : u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1926 0 : u32 data = 0, mask = 1 << (max_rb_num - 1);
1927 : unsigned i, j;
1928 :
1929 : /* mask out the RBs that don't exist on that asic */
1930 0 : tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1931 : /* make sure at least one RB is available */
1932 0 : if ((tmp & 0xff) != 0xff)
1933 0 : disabled_rb_mask = tmp;
1934 :
1935 0 : rendering_pipe_num = 1 << tiling_pipe_num;
1936 0 : req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1937 0 : BUG_ON(rendering_pipe_num < req_rb_num);
1938 :
1939 0 : pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1940 0 : pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1941 :
1942 0 : if (rdev->family <= CHIP_RV740) {
1943 : /* r6xx/r7xx */
1944 : rb_num_width = 2;
1945 0 : } else {
1946 : /* eg+ */
1947 : rb_num_width = 4;
1948 : }
1949 :
1950 0 : for (i = 0; i < max_rb_num; i++) {
1951 0 : if (!(mask & disabled_rb_mask)) {
1952 0 : for (j = 0; j < pipe_rb_ratio; j++) {
1953 0 : data <<= rb_num_width;
1954 0 : data |= max_rb_num - i - 1;
1955 : }
1956 0 : if (pipe_rb_remain) {
1957 0 : data <<= rb_num_width;
1958 0 : data |= max_rb_num - i - 1;
1959 0 : pipe_rb_remain--;
1960 0 : }
1961 : }
1962 0 : mask >>= 1;
1963 : }
1964 :
1965 0 : return data;
1966 : }
1967 :
1968 0 : int r600_count_pipe_bits(uint32_t val)
1969 : {
1970 0 : return hweight32(val);
1971 : }
1972 :
1973 0 : static void r600_gpu_init(struct radeon_device *rdev)
1974 : {
1975 : u32 tiling_config;
1976 : u32 ramcfg;
1977 : u32 cc_gc_shader_pipe_config;
1978 : u32 tmp;
1979 : int i, j;
1980 : u32 sq_config;
1981 : u32 sq_gpr_resource_mgmt_1 = 0;
1982 : u32 sq_gpr_resource_mgmt_2 = 0;
1983 : u32 sq_thread_resource_mgmt = 0;
1984 : u32 sq_stack_resource_mgmt_1 = 0;
1985 : u32 sq_stack_resource_mgmt_2 = 0;
1986 : u32 disabled_rb_mask;
1987 :
1988 0 : rdev->config.r600.tiling_group_size = 256;
1989 0 : switch (rdev->family) {
1990 : case CHIP_R600:
1991 0 : rdev->config.r600.max_pipes = 4;
1992 0 : rdev->config.r600.max_tile_pipes = 8;
1993 0 : rdev->config.r600.max_simds = 4;
1994 0 : rdev->config.r600.max_backends = 4;
1995 0 : rdev->config.r600.max_gprs = 256;
1996 0 : rdev->config.r600.max_threads = 192;
1997 0 : rdev->config.r600.max_stack_entries = 256;
1998 0 : rdev->config.r600.max_hw_contexts = 8;
1999 0 : rdev->config.r600.max_gs_threads = 16;
2000 0 : rdev->config.r600.sx_max_export_size = 128;
2001 0 : rdev->config.r600.sx_max_export_pos_size = 16;
2002 0 : rdev->config.r600.sx_max_export_smx_size = 128;
2003 0 : rdev->config.r600.sq_num_cf_insts = 2;
2004 0 : break;
2005 : case CHIP_RV630:
2006 : case CHIP_RV635:
2007 0 : rdev->config.r600.max_pipes = 2;
2008 0 : rdev->config.r600.max_tile_pipes = 2;
2009 0 : rdev->config.r600.max_simds = 3;
2010 0 : rdev->config.r600.max_backends = 1;
2011 0 : rdev->config.r600.max_gprs = 128;
2012 0 : rdev->config.r600.max_threads = 192;
2013 0 : rdev->config.r600.max_stack_entries = 128;
2014 0 : rdev->config.r600.max_hw_contexts = 8;
2015 0 : rdev->config.r600.max_gs_threads = 4;
2016 0 : rdev->config.r600.sx_max_export_size = 128;
2017 0 : rdev->config.r600.sx_max_export_pos_size = 16;
2018 0 : rdev->config.r600.sx_max_export_smx_size = 128;
2019 0 : rdev->config.r600.sq_num_cf_insts = 2;
2020 0 : break;
2021 : case CHIP_RV610:
2022 : case CHIP_RV620:
2023 : case CHIP_RS780:
2024 : case CHIP_RS880:
2025 0 : rdev->config.r600.max_pipes = 1;
2026 0 : rdev->config.r600.max_tile_pipes = 1;
2027 0 : rdev->config.r600.max_simds = 2;
2028 0 : rdev->config.r600.max_backends = 1;
2029 0 : rdev->config.r600.max_gprs = 128;
2030 0 : rdev->config.r600.max_threads = 192;
2031 0 : rdev->config.r600.max_stack_entries = 128;
2032 0 : rdev->config.r600.max_hw_contexts = 4;
2033 0 : rdev->config.r600.max_gs_threads = 4;
2034 0 : rdev->config.r600.sx_max_export_size = 128;
2035 0 : rdev->config.r600.sx_max_export_pos_size = 16;
2036 0 : rdev->config.r600.sx_max_export_smx_size = 128;
2037 0 : rdev->config.r600.sq_num_cf_insts = 1;
2038 0 : break;
2039 : case CHIP_RV670:
2040 0 : rdev->config.r600.max_pipes = 4;
2041 0 : rdev->config.r600.max_tile_pipes = 4;
2042 0 : rdev->config.r600.max_simds = 4;
2043 0 : rdev->config.r600.max_backends = 4;
2044 0 : rdev->config.r600.max_gprs = 192;
2045 0 : rdev->config.r600.max_threads = 192;
2046 0 : rdev->config.r600.max_stack_entries = 256;
2047 0 : rdev->config.r600.max_hw_contexts = 8;
2048 0 : rdev->config.r600.max_gs_threads = 16;
2049 0 : rdev->config.r600.sx_max_export_size = 128;
2050 0 : rdev->config.r600.sx_max_export_pos_size = 16;
2051 0 : rdev->config.r600.sx_max_export_smx_size = 128;
2052 0 : rdev->config.r600.sq_num_cf_insts = 2;
2053 0 : break;
2054 : default:
2055 : break;
2056 : }
2057 :
2058 : /* Initialize HDP */
2059 0 : for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2060 0 : WREG32((0x2c14 + j), 0x00000000);
2061 0 : WREG32((0x2c18 + j), 0x00000000);
2062 0 : WREG32((0x2c1c + j), 0x00000000);
2063 0 : WREG32((0x2c20 + j), 0x00000000);
2064 0 : WREG32((0x2c24 + j), 0x00000000);
2065 : }
2066 :
2067 0 : WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2068 :
2069 : /* Setup tiling */
2070 : tiling_config = 0;
2071 0 : ramcfg = RREG32(RAMCFG);
2072 0 : switch (rdev->config.r600.max_tile_pipes) {
2073 : case 1:
2074 : tiling_config |= PIPE_TILING(0);
2075 0 : break;
2076 : case 2:
2077 : tiling_config |= PIPE_TILING(1);
2078 0 : break;
2079 : case 4:
2080 : tiling_config |= PIPE_TILING(2);
2081 0 : break;
2082 : case 8:
2083 : tiling_config |= PIPE_TILING(3);
2084 0 : break;
2085 : default:
2086 : break;
2087 : }
2088 0 : rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
2089 0 : rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2090 0 : tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2091 0 : tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2092 :
2093 0 : tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
2094 0 : if (tmp > 3) {
2095 0 : tiling_config |= ROW_TILING(3);
2096 0 : tiling_config |= SAMPLE_SPLIT(3);
2097 0 : } else {
2098 0 : tiling_config |= ROW_TILING(tmp);
2099 0 : tiling_config |= SAMPLE_SPLIT(tmp);
2100 : }
2101 0 : tiling_config |= BANK_SWAPS(1);
2102 :
2103 0 : cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
2104 0 : tmp = rdev->config.r600.max_simds -
2105 0 : r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
2106 0 : rdev->config.r600.active_simds = tmp;
2107 :
2108 0 : disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
2109 : tmp = 0;
2110 0 : for (i = 0; i < rdev->config.r600.max_backends; i++)
2111 0 : tmp |= (1 << i);
2112 : /* if all the backends are disabled, fix it up here */
2113 0 : if ((disabled_rb_mask & tmp) == tmp) {
2114 0 : for (i = 0; i < rdev->config.r600.max_backends; i++)
2115 0 : disabled_rb_mask &= ~(1 << i);
2116 : }
2117 0 : tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
2118 0 : tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
2119 : R6XX_MAX_BACKENDS, disabled_rb_mask);
2120 0 : tiling_config |= tmp << 16;
2121 0 : rdev->config.r600.backend_map = tmp;
2122 :
2123 0 : rdev->config.r600.tile_config = tiling_config;
2124 0 : WREG32(GB_TILING_CONFIG, tiling_config);
2125 0 : WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
2126 0 : WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
2127 0 : WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
2128 :
2129 0 : tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
2130 0 : WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
2131 0 : WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
2132 :
2133 : /* Setup some CP states */
2134 0 : WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
2135 0 : WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
2136 :
2137 0 : WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
2138 : SYNC_WALKER | SYNC_ALIGNER));
2139 : /* Setup various GPU states */
2140 0 : if (rdev->family == CHIP_RV670)
2141 0 : WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
2142 :
2143 0 : tmp = RREG32(SX_DEBUG_1);
2144 0 : tmp |= SMX_EVENT_RELEASE;
2145 0 : if ((rdev->family > CHIP_R600))
2146 0 : tmp |= ENABLE_NEW_SMX_ADDRESS;
2147 0 : WREG32(SX_DEBUG_1, tmp);
2148 :
2149 0 : if (((rdev->family) == CHIP_R600) ||
2150 0 : ((rdev->family) == CHIP_RV630) ||
2151 0 : ((rdev->family) == CHIP_RV610) ||
2152 0 : ((rdev->family) == CHIP_RV620) ||
2153 0 : ((rdev->family) == CHIP_RS780) ||
2154 0 : ((rdev->family) == CHIP_RS880)) {
2155 0 : WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
2156 0 : } else {
2157 0 : WREG32(DB_DEBUG, 0);
2158 : }
2159 0 : WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2160 : DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2161 :
2162 0 : WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2163 0 : WREG32(VGT_NUM_INSTANCES, 0);
2164 :
2165 0 : WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2166 0 : WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2167 :
2168 0 : tmp = RREG32(SQ_MS_FIFO_SIZES);
2169 0 : if (((rdev->family) == CHIP_RV610) ||
2170 0 : ((rdev->family) == CHIP_RV620) ||
2171 0 : ((rdev->family) == CHIP_RS780) ||
2172 0 : ((rdev->family) == CHIP_RS880)) {
2173 : tmp = (CACHE_FIFO_SIZE(0xa) |
2174 : FETCH_FIFO_HIWATER(0xa) |
2175 : DONE_FIFO_HIWATER(0xe0) |
2176 : ALU_UPDATE_FIFO_HIWATER(0x8));
2177 0 : } else if (((rdev->family) == CHIP_R600) ||
2178 0 : ((rdev->family) == CHIP_RV630)) {
2179 0 : tmp &= ~DONE_FIFO_HIWATER(0xff);
2180 0 : tmp |= DONE_FIFO_HIWATER(0x4);
2181 0 : }
2182 0 : WREG32(SQ_MS_FIFO_SIZES, tmp);
2183 :
2184 : /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2185 : * should be adjusted as needed by the 2D/3D drivers. This just sets default values
2186 : */
2187 0 : sq_config = RREG32(SQ_CONFIG);
2188 0 : sq_config &= ~(PS_PRIO(3) |
2189 : VS_PRIO(3) |
2190 : GS_PRIO(3) |
2191 : ES_PRIO(3));
2192 0 : sq_config |= (DX9_CONSTS |
2193 : VC_ENABLE |
2194 : PS_PRIO(0) |
2195 : VS_PRIO(1) |
2196 : GS_PRIO(2) |
2197 : ES_PRIO(3));
2198 :
2199 0 : if ((rdev->family) == CHIP_R600) {
2200 : sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2201 : NUM_VS_GPRS(124) |
2202 : NUM_CLAUSE_TEMP_GPRS(4));
2203 : sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2204 : NUM_ES_GPRS(0));
2205 : sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2206 : NUM_VS_THREADS(48) |
2207 : NUM_GS_THREADS(4) |
2208 : NUM_ES_THREADS(4));
2209 : sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2210 : NUM_VS_STACK_ENTRIES(128));
2211 : sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2212 : NUM_ES_STACK_ENTRIES(0));
2213 0 : } else if (((rdev->family) == CHIP_RV610) ||
2214 0 : ((rdev->family) == CHIP_RV620) ||
2215 0 : ((rdev->family) == CHIP_RS780) ||
2216 0 : ((rdev->family) == CHIP_RS880)) {
2217 : /* no vertex cache */
2218 0 : sq_config &= ~VC_ENABLE;
2219 :
2220 : sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2221 : NUM_VS_GPRS(44) |
2222 : NUM_CLAUSE_TEMP_GPRS(2));
2223 : sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2224 : NUM_ES_GPRS(17));
2225 : sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2226 : NUM_VS_THREADS(78) |
2227 : NUM_GS_THREADS(4) |
2228 : NUM_ES_THREADS(31));
2229 : sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2230 : NUM_VS_STACK_ENTRIES(40));
2231 : sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2232 : NUM_ES_STACK_ENTRIES(16));
2233 0 : } else if (((rdev->family) == CHIP_RV630) ||
2234 0 : ((rdev->family) == CHIP_RV635)) {
2235 : sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2236 : NUM_VS_GPRS(44) |
2237 : NUM_CLAUSE_TEMP_GPRS(2));
2238 : sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2239 : NUM_ES_GPRS(18));
2240 : sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2241 : NUM_VS_THREADS(78) |
2242 : NUM_GS_THREADS(4) |
2243 : NUM_ES_THREADS(31));
2244 : sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2245 : NUM_VS_STACK_ENTRIES(40));
2246 : sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2247 : NUM_ES_STACK_ENTRIES(16));
2248 0 : } else if ((rdev->family) == CHIP_RV670) {
2249 : sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2250 : NUM_VS_GPRS(44) |
2251 : NUM_CLAUSE_TEMP_GPRS(2));
2252 : sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2253 : NUM_ES_GPRS(17));
2254 : sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2255 : NUM_VS_THREADS(78) |
2256 : NUM_GS_THREADS(4) |
2257 : NUM_ES_THREADS(31));
2258 : sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2259 : NUM_VS_STACK_ENTRIES(64));
2260 : sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2261 : NUM_ES_STACK_ENTRIES(64));
2262 0 : }
2263 :
2264 0 : WREG32(SQ_CONFIG, sq_config);
2265 0 : WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2266 0 : WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2267 0 : WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2268 0 : WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2269 0 : WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2270 :
2271 0 : if (((rdev->family) == CHIP_RV610) ||
2272 0 : ((rdev->family) == CHIP_RV620) ||
2273 0 : ((rdev->family) == CHIP_RS780) ||
2274 0 : ((rdev->family) == CHIP_RS880)) {
2275 0 : WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2276 0 : } else {
2277 0 : WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2278 : }
2279 :
2280 : /* More default values. 2D/3D driver should adjust as needed */
2281 0 : WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2282 : S1_X(0x4) | S1_Y(0xc)));
2283 0 : WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2284 : S1_X(0x2) | S1_Y(0x2) |
2285 : S2_X(0xa) | S2_Y(0x6) |
2286 : S3_X(0x6) | S3_Y(0xa)));
2287 0 : WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2288 : S1_X(0x4) | S1_Y(0xc) |
2289 : S2_X(0x1) | S2_Y(0x6) |
2290 : S3_X(0xa) | S3_Y(0xe)));
2291 0 : WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2292 : S5_X(0x0) | S5_Y(0x0) |
2293 : S6_X(0xb) | S6_Y(0x4) |
2294 : S7_X(0x7) | S7_Y(0x8)));
2295 :
2296 0 : WREG32(VGT_STRMOUT_EN, 0);
2297 0 : tmp = rdev->config.r600.max_pipes * 16;
2298 0 : switch (rdev->family) {
2299 : case CHIP_RV610:
2300 : case CHIP_RV620:
2301 : case CHIP_RS780:
2302 : case CHIP_RS880:
2303 0 : tmp += 32;
2304 0 : break;
2305 : case CHIP_RV670:
2306 0 : tmp += 128;
2307 0 : break;
2308 : default:
2309 : break;
2310 : }
2311 0 : if (tmp > 256) {
2312 : tmp = 256;
2313 0 : }
2314 0 : WREG32(VGT_ES_PER_GS, 128);
2315 0 : WREG32(VGT_GS_PER_ES, tmp);
2316 0 : WREG32(VGT_GS_PER_VS, 2);
2317 0 : WREG32(VGT_GS_VERTEX_REUSE, 16);
2318 :
2319 : /* more default values. 2D/3D driver should adjust as needed */
2320 0 : WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2321 0 : WREG32(VGT_STRMOUT_EN, 0);
2322 0 : WREG32(SX_MISC, 0);
2323 0 : WREG32(PA_SC_MODE_CNTL, 0);
2324 0 : WREG32(PA_SC_AA_CONFIG, 0);
2325 0 : WREG32(PA_SC_LINE_STIPPLE, 0);
2326 0 : WREG32(SPI_INPUT_Z, 0);
2327 0 : WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2328 0 : WREG32(CB_COLOR7_FRAG, 0);
2329 :
2330 : /* Clear render buffer base addresses */
2331 0 : WREG32(CB_COLOR0_BASE, 0);
2332 0 : WREG32(CB_COLOR1_BASE, 0);
2333 0 : WREG32(CB_COLOR2_BASE, 0);
2334 0 : WREG32(CB_COLOR3_BASE, 0);
2335 0 : WREG32(CB_COLOR4_BASE, 0);
2336 0 : WREG32(CB_COLOR5_BASE, 0);
2337 0 : WREG32(CB_COLOR6_BASE, 0);
2338 0 : WREG32(CB_COLOR7_BASE, 0);
2339 0 : WREG32(CB_COLOR7_FRAG, 0);
2340 :
2341 0 : switch (rdev->family) {
2342 : case CHIP_RV610:
2343 : case CHIP_RV620:
2344 : case CHIP_RS780:
2345 : case CHIP_RS880:
2346 : tmp = TC_L2_SIZE(8);
2347 0 : break;
2348 : case CHIP_RV630:
2349 : case CHIP_RV635:
2350 : tmp = TC_L2_SIZE(4);
2351 0 : break;
2352 : case CHIP_R600:
2353 : tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2354 0 : break;
2355 : default:
2356 : tmp = TC_L2_SIZE(0);
2357 0 : break;
2358 : }
2359 0 : WREG32(TC_CNTL, tmp);
2360 :
2361 0 : tmp = RREG32(HDP_HOST_PATH_CNTL);
2362 0 : WREG32(HDP_HOST_PATH_CNTL, tmp);
2363 :
2364 0 : tmp = RREG32(ARB_POP);
2365 0 : tmp |= ENABLE_TC128;
2366 0 : WREG32(ARB_POP, tmp);
2367 :
2368 0 : WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2369 0 : WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2370 : NUM_CLIP_SEQ(3)));
2371 0 : WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2372 0 : WREG32(VC_ENHANCE, 0);
2373 0 : }
2374 :
2375 :
2376 : /*
2377 : * Indirect registers accessor
2378 : */
2379 0 : u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2380 : {
2381 : unsigned long flags;
2382 : u32 r;
2383 :
2384 0 : spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2385 0 : WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2386 0 : (void)RREG32(PCIE_PORT_INDEX);
2387 0 : r = RREG32(PCIE_PORT_DATA);
2388 0 : spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2389 0 : return r;
2390 : }
2391 :
2392 0 : void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2393 : {
2394 : unsigned long flags;
2395 :
2396 0 : spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2397 0 : WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2398 0 : (void)RREG32(PCIE_PORT_INDEX);
2399 0 : WREG32(PCIE_PORT_DATA, (v));
2400 0 : (void)RREG32(PCIE_PORT_DATA);
2401 0 : spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2402 0 : }
2403 :
2404 : /*
2405 : * CP & Ring
2406 : */
2407 0 : void r600_cp_stop(struct radeon_device *rdev)
2408 : {
2409 0 : if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2410 0 : radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2411 0 : WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2412 0 : WREG32(SCRATCH_UMSK, 0);
2413 0 : rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2414 0 : }
2415 :
2416 0 : int r600_init_microcode(struct radeon_device *rdev)
2417 : {
2418 : const char *chip_name;
2419 : const char *rlc_chip_name;
2420 : const char *smc_chip_name = "RV770";
2421 : size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2422 0 : char fw_name[30];
2423 : int err;
2424 :
2425 : DRM_DEBUG("\n");
2426 :
2427 0 : switch (rdev->family) {
2428 : case CHIP_R600:
2429 : chip_name = "R600";
2430 : rlc_chip_name = "R600";
2431 0 : break;
2432 : case CHIP_RV610:
2433 : chip_name = "RV610";
2434 : rlc_chip_name = "R600";
2435 0 : break;
2436 : case CHIP_RV630:
2437 : chip_name = "RV630";
2438 : rlc_chip_name = "R600";
2439 0 : break;
2440 : case CHIP_RV620:
2441 : chip_name = "RV620";
2442 : rlc_chip_name = "R600";
2443 0 : break;
2444 : case CHIP_RV635:
2445 : chip_name = "RV635";
2446 : rlc_chip_name = "R600";
2447 0 : break;
2448 : case CHIP_RV670:
2449 : chip_name = "RV670";
2450 : rlc_chip_name = "R600";
2451 0 : break;
2452 : case CHIP_RS780:
2453 : case CHIP_RS880:
2454 : chip_name = "RS780";
2455 : rlc_chip_name = "R600";
2456 0 : break;
2457 : case CHIP_RV770:
2458 : chip_name = "RV770";
2459 : rlc_chip_name = "R700";
2460 : smc_chip_name = "RV770";
2461 : smc_req_size = roundup2(RV770_SMC_UCODE_SIZE, 4);
2462 0 : break;
2463 : case CHIP_RV730:
2464 : chip_name = "RV730";
2465 : rlc_chip_name = "R700";
2466 : smc_chip_name = "RV730";
2467 : smc_req_size = roundup2(RV730_SMC_UCODE_SIZE, 4);
2468 0 : break;
2469 : case CHIP_RV710:
2470 : chip_name = "RV710";
2471 : rlc_chip_name = "R700";
2472 : smc_chip_name = "RV710";
2473 : smc_req_size = roundup2(RV710_SMC_UCODE_SIZE, 4);
2474 0 : break;
2475 : case CHIP_RV740:
2476 : chip_name = "RV730";
2477 : rlc_chip_name = "R700";
2478 : smc_chip_name = "RV740";
2479 : smc_req_size = roundup2(RV740_SMC_UCODE_SIZE, 4);
2480 0 : break;
2481 : case CHIP_CEDAR:
2482 : chip_name = "CEDAR";
2483 : rlc_chip_name = "CEDAR";
2484 : smc_chip_name = "CEDAR";
2485 : smc_req_size = roundup2(CEDAR_SMC_UCODE_SIZE, 4);
2486 0 : break;
2487 : case CHIP_REDWOOD:
2488 : chip_name = "REDWOOD";
2489 : rlc_chip_name = "REDWOOD";
2490 : smc_chip_name = "REDWOOD";
2491 : smc_req_size = roundup2(REDWOOD_SMC_UCODE_SIZE, 4);
2492 0 : break;
2493 : case CHIP_JUNIPER:
2494 : chip_name = "JUNIPER";
2495 : rlc_chip_name = "JUNIPER";
2496 : smc_chip_name = "JUNIPER";
2497 : smc_req_size = roundup2(JUNIPER_SMC_UCODE_SIZE, 4);
2498 0 : break;
2499 : case CHIP_CYPRESS:
2500 : case CHIP_HEMLOCK:
2501 : chip_name = "CYPRESS";
2502 : rlc_chip_name = "CYPRESS";
2503 : smc_chip_name = "CYPRESS";
2504 : smc_req_size = roundup2(CYPRESS_SMC_UCODE_SIZE, 4);
2505 0 : break;
2506 : case CHIP_PALM:
2507 : chip_name = "PALM";
2508 : rlc_chip_name = "SUMO";
2509 0 : break;
2510 : case CHIP_SUMO:
2511 : chip_name = "SUMO";
2512 : rlc_chip_name = "SUMO";
2513 0 : break;
2514 : case CHIP_SUMO2:
2515 : chip_name = "SUMO2";
2516 : rlc_chip_name = "SUMO";
2517 0 : break;
2518 0 : default: BUG();
2519 : }
2520 :
2521 0 : if (rdev->family >= CHIP_CEDAR) {
2522 : pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2523 : me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2524 : rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2525 0 : } else if (rdev->family >= CHIP_RV770) {
2526 : pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2527 : me_req_size = R700_PM4_UCODE_SIZE * 4;
2528 : rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2529 0 : } else {
2530 : pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2531 : me_req_size = R600_PM4_UCODE_SIZE * 12;
2532 : rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2533 : }
2534 :
2535 : DRM_INFO("Loading %s Microcode\n", chip_name);
2536 :
2537 0 : snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2538 0 : err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2539 0 : if (err)
2540 : goto out;
2541 0 : if (rdev->pfp_fw->size != pfp_req_size) {
2542 0 : printk(KERN_ERR
2543 : "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2544 : rdev->pfp_fw->size, fw_name);
2545 : err = -EINVAL;
2546 0 : goto out;
2547 : }
2548 :
2549 0 : snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2550 0 : err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2551 0 : if (err)
2552 : goto out;
2553 0 : if (rdev->me_fw->size != me_req_size) {
2554 0 : printk(KERN_ERR
2555 : "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2556 : rdev->me_fw->size, fw_name);
2557 : err = -EINVAL;
2558 0 : }
2559 :
2560 0 : snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2561 0 : err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2562 0 : if (err)
2563 : goto out;
2564 0 : if (rdev->rlc_fw->size != rlc_req_size) {
2565 0 : printk(KERN_ERR
2566 : "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2567 : rdev->rlc_fw->size, fw_name);
2568 : err = -EINVAL;
2569 0 : }
2570 :
2571 0 : if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2572 0 : snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2573 0 : err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2574 0 : if (err) {
2575 0 : printk(KERN_ERR
2576 : "smc: error loading firmware \"%s\"\n",
2577 : fw_name);
2578 0 : release_firmware(rdev->smc_fw);
2579 0 : rdev->smc_fw = NULL;
2580 : err = 0;
2581 0 : } else if (rdev->smc_fw->size != smc_req_size) {
2582 0 : printk(KERN_ERR
2583 : "smc: Bogus length %zu in firmware \"%s\"\n",
2584 : rdev->smc_fw->size, fw_name);
2585 : err = -EINVAL;
2586 0 : }
2587 : }
2588 :
2589 : out:
2590 0 : if (err) {
2591 0 : if (err != -EINVAL)
2592 0 : printk(KERN_ERR
2593 : "r600_cp: Failed to load firmware \"%s\"\n",
2594 : fw_name);
2595 0 : release_firmware(rdev->pfp_fw);
2596 0 : rdev->pfp_fw = NULL;
2597 0 : release_firmware(rdev->me_fw);
2598 0 : rdev->me_fw = NULL;
2599 0 : release_firmware(rdev->rlc_fw);
2600 0 : rdev->rlc_fw = NULL;
2601 0 : release_firmware(rdev->smc_fw);
2602 0 : rdev->smc_fw = NULL;
2603 0 : }
2604 0 : return err;
2605 0 : }
2606 :
2607 0 : u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2608 : struct radeon_ring *ring)
2609 : {
2610 : u32 rptr;
2611 :
2612 0 : if (rdev->wb.enabled)
2613 0 : rptr = rdev->wb.wb[ring->rptr_offs/4];
2614 : else
2615 0 : rptr = RREG32(R600_CP_RB_RPTR);
2616 :
2617 0 : return rptr;
2618 : }
2619 :
2620 0 : u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2621 : struct radeon_ring *ring)
2622 : {
2623 : u32 wptr;
2624 :
2625 0 : wptr = RREG32(R600_CP_RB_WPTR);
2626 :
2627 0 : return wptr;
2628 : }
2629 :
2630 0 : void r600_gfx_set_wptr(struct radeon_device *rdev,
2631 : struct radeon_ring *ring)
2632 : {
2633 0 : WREG32(R600_CP_RB_WPTR, ring->wptr);
2634 0 : (void)RREG32(R600_CP_RB_WPTR);
2635 0 : }
2636 :
2637 0 : static int r600_cp_load_microcode(struct radeon_device *rdev)
2638 : {
2639 : const __be32 *fw_data;
2640 : int i;
2641 :
2642 0 : if (!rdev->me_fw || !rdev->pfp_fw)
2643 0 : return -EINVAL;
2644 :
2645 0 : r600_cp_stop(rdev);
2646 :
2647 0 : WREG32(CP_RB_CNTL,
2648 : #ifdef __BIG_ENDIAN
2649 : BUF_SWAP_32BIT |
2650 : #endif
2651 : RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2652 :
2653 : /* Reset cp */
2654 0 : WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2655 0 : RREG32(GRBM_SOFT_RESET);
2656 0 : mdelay(15);
2657 0 : WREG32(GRBM_SOFT_RESET, 0);
2658 :
2659 0 : WREG32(CP_ME_RAM_WADDR, 0);
2660 :
2661 0 : fw_data = (const __be32 *)rdev->me_fw->data;
2662 0 : WREG32(CP_ME_RAM_WADDR, 0);
2663 0 : for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2664 0 : WREG32(CP_ME_RAM_DATA,
2665 : be32_to_cpup(fw_data++));
2666 :
2667 0 : fw_data = (const __be32 *)rdev->pfp_fw->data;
2668 0 : WREG32(CP_PFP_UCODE_ADDR, 0);
2669 0 : for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2670 0 : WREG32(CP_PFP_UCODE_DATA,
2671 : be32_to_cpup(fw_data++));
2672 :
2673 0 : WREG32(CP_PFP_UCODE_ADDR, 0);
2674 0 : WREG32(CP_ME_RAM_WADDR, 0);
2675 0 : WREG32(CP_ME_RAM_RADDR, 0);
2676 0 : return 0;
2677 0 : }
2678 :
2679 0 : int r600_cp_start(struct radeon_device *rdev)
2680 : {
2681 0 : struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2682 : int r;
2683 : uint32_t cp_me;
2684 :
2685 0 : r = radeon_ring_lock(rdev, ring, 7);
2686 0 : if (r) {
2687 0 : DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2688 0 : return r;
2689 : }
2690 0 : radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2691 0 : radeon_ring_write(ring, 0x1);
2692 0 : if (rdev->family >= CHIP_RV770) {
2693 0 : radeon_ring_write(ring, 0x0);
2694 0 : radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2695 0 : } else {
2696 0 : radeon_ring_write(ring, 0x3);
2697 0 : radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2698 : }
2699 0 : radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2700 0 : radeon_ring_write(ring, 0);
2701 0 : radeon_ring_write(ring, 0);
2702 0 : radeon_ring_unlock_commit(rdev, ring, false);
2703 :
2704 : cp_me = 0xff;
2705 0 : WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2706 0 : return 0;
2707 0 : }
2708 :
2709 0 : int r600_cp_resume(struct radeon_device *rdev)
2710 : {
2711 0 : struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2712 : u32 tmp;
2713 : u32 rb_bufsz;
2714 : int r;
2715 :
2716 : /* Reset cp */
2717 0 : WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2718 0 : RREG32(GRBM_SOFT_RESET);
2719 0 : mdelay(15);
2720 0 : WREG32(GRBM_SOFT_RESET, 0);
2721 :
2722 : /* Set ring buffer size */
2723 0 : rb_bufsz = order_base_2(ring->ring_size / 8);
2724 0 : tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2725 : #ifdef __BIG_ENDIAN
2726 : tmp |= BUF_SWAP_32BIT;
2727 : #endif
2728 0 : WREG32(CP_RB_CNTL, tmp);
2729 0 : WREG32(CP_SEM_WAIT_TIMER, 0x0);
2730 :
2731 : /* Set the write pointer delay */
2732 0 : WREG32(CP_RB_WPTR_DELAY, 0);
2733 :
2734 : /* Initialize the ring buffer's read and write pointers */
2735 0 : WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2736 0 : WREG32(CP_RB_RPTR_WR, 0);
2737 0 : ring->wptr = 0;
2738 0 : WREG32(CP_RB_WPTR, ring->wptr);
2739 :
2740 : /* set the wb address whether it's enabled or not */
2741 0 : WREG32(CP_RB_RPTR_ADDR,
2742 : ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2743 0 : WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2744 0 : WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2745 :
2746 0 : if (rdev->wb.enabled)
2747 0 : WREG32(SCRATCH_UMSK, 0xff);
2748 : else {
2749 0 : tmp |= RB_NO_UPDATE;
2750 0 : WREG32(SCRATCH_UMSK, 0);
2751 : }
2752 :
2753 0 : mdelay(1);
2754 0 : WREG32(CP_RB_CNTL, tmp);
2755 :
2756 0 : WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2757 0 : WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2758 :
2759 0 : r600_cp_start(rdev);
2760 0 : ring->ready = true;
2761 0 : r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2762 0 : if (r) {
2763 0 : ring->ready = false;
2764 0 : return r;
2765 : }
2766 :
2767 0 : if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2768 0 : radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2769 :
2770 0 : return 0;
2771 0 : }
2772 :
2773 0 : void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2774 : {
2775 : u32 rb_bufsz;
2776 : int r;
2777 :
2778 : /* Align ring size */
2779 0 : rb_bufsz = order_base_2(ring_size / 8);
2780 0 : ring_size = (1 << (rb_bufsz + 1)) * 4;
2781 0 : ring->ring_size = ring_size;
2782 0 : ring->align_mask = 16 - 1;
2783 :
2784 0 : if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2785 0 : r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2786 0 : if (r) {
2787 0 : DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2788 0 : ring->rptr_save_reg = 0;
2789 0 : }
2790 : }
2791 0 : }
2792 :
2793 0 : void r600_cp_fini(struct radeon_device *rdev)
2794 : {
2795 0 : struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2796 0 : r600_cp_stop(rdev);
2797 0 : radeon_ring_fini(rdev, ring);
2798 0 : radeon_scratch_free(rdev, ring->rptr_save_reg);
2799 0 : }
2800 :
2801 : /*
2802 : * GPU scratch registers helpers function.
2803 : */
2804 0 : void r600_scratch_init(struct radeon_device *rdev)
2805 : {
2806 : int i;
2807 :
2808 0 : rdev->scratch.num_reg = 7;
2809 0 : rdev->scratch.reg_base = SCRATCH_REG0;
2810 0 : for (i = 0; i < rdev->scratch.num_reg; i++) {
2811 0 : rdev->scratch.free[i] = true;
2812 0 : rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2813 : }
2814 0 : }
2815 :
2816 0 : int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2817 : {
2818 0 : uint32_t scratch;
2819 : uint32_t tmp = 0;
2820 : unsigned i;
2821 : int r;
2822 :
2823 0 : r = radeon_scratch_get(rdev, &scratch);
2824 0 : if (r) {
2825 0 : DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2826 0 : return r;
2827 : }
2828 0 : WREG32(scratch, 0xCAFEDEAD);
2829 0 : r = radeon_ring_lock(rdev, ring, 3);
2830 0 : if (r) {
2831 0 : DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2832 0 : radeon_scratch_free(rdev, scratch);
2833 0 : return r;
2834 : }
2835 0 : radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2836 0 : radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2837 0 : radeon_ring_write(ring, 0xDEADBEEF);
2838 0 : radeon_ring_unlock_commit(rdev, ring, false);
2839 0 : for (i = 0; i < rdev->usec_timeout; i++) {
2840 0 : tmp = RREG32(scratch);
2841 0 : if (tmp == 0xDEADBEEF)
2842 : break;
2843 0 : DRM_UDELAY(1);
2844 : }
2845 0 : if (i < rdev->usec_timeout) {
2846 : DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2847 : } else {
2848 0 : DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2849 : ring->idx, scratch, tmp);
2850 : r = -EINVAL;
2851 : }
2852 0 : radeon_scratch_free(rdev, scratch);
2853 0 : return r;
2854 0 : }
2855 :
2856 : /*
2857 : * CP fences/semaphores
2858 : */
2859 :
2860 0 : void r600_fence_ring_emit(struct radeon_device *rdev,
2861 : struct radeon_fence *fence)
2862 : {
2863 0 : struct radeon_ring *ring = &rdev->ring[fence->ring];
2864 : u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2865 : PACKET3_SH_ACTION_ENA;
2866 :
2867 0 : if (rdev->family >= CHIP_RV770)
2868 0 : cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2869 :
2870 0 : if (rdev->wb.use_event) {
2871 0 : u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2872 : /* flush read cache over gart */
2873 0 : radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2874 0 : radeon_ring_write(ring, cp_coher_cntl);
2875 0 : radeon_ring_write(ring, 0xFFFFFFFF);
2876 0 : radeon_ring_write(ring, 0);
2877 0 : radeon_ring_write(ring, 10); /* poll interval */
2878 : /* EVENT_WRITE_EOP - flush caches, send int */
2879 0 : radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2880 0 : radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2881 0 : radeon_ring_write(ring, lower_32_bits(addr));
2882 0 : radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2883 0 : radeon_ring_write(ring, fence->seq);
2884 0 : radeon_ring_write(ring, 0);
2885 0 : } else {
2886 : /* flush read cache over gart */
2887 0 : radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2888 0 : radeon_ring_write(ring, cp_coher_cntl);
2889 0 : radeon_ring_write(ring, 0xFFFFFFFF);
2890 0 : radeon_ring_write(ring, 0);
2891 0 : radeon_ring_write(ring, 10); /* poll interval */
2892 0 : radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2893 0 : radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2894 : /* wait for 3D idle clean */
2895 0 : radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2896 0 : radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2897 0 : radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2898 : /* Emit fence sequence & fire IRQ */
2899 0 : radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2900 0 : radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2901 0 : radeon_ring_write(ring, fence->seq);
2902 : /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2903 0 : radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2904 0 : radeon_ring_write(ring, RB_INT_STAT);
2905 : }
2906 0 : }
2907 :
2908 : /**
2909 : * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2910 : *
2911 : * @rdev: radeon_device pointer
2912 : * @ring: radeon ring buffer object
2913 : * @semaphore: radeon semaphore object
2914 : * @emit_wait: Is this a sempahore wait?
2915 : *
2916 : * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2917 : * from running ahead of semaphore waits.
2918 : */
2919 0 : bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2920 : struct radeon_ring *ring,
2921 : struct radeon_semaphore *semaphore,
2922 : bool emit_wait)
2923 : {
2924 0 : uint64_t addr = semaphore->gpu_addr;
2925 0 : unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2926 :
2927 0 : if (rdev->family < CHIP_CAYMAN)
2928 0 : sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2929 :
2930 0 : radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2931 0 : radeon_ring_write(ring, lower_32_bits(addr));
2932 0 : radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2933 :
2934 : /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2935 0 : if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
2936 : /* Prevent the PFP from running ahead of the semaphore wait */
2937 0 : radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2938 0 : radeon_ring_write(ring, 0x0);
2939 0 : }
2940 :
2941 0 : return true;
2942 : }
2943 :
2944 : /**
2945 : * r600_copy_cpdma - copy pages using the CP DMA engine
2946 : *
2947 : * @rdev: radeon_device pointer
2948 : * @src_offset: src GPU address
2949 : * @dst_offset: dst GPU address
2950 : * @num_gpu_pages: number of GPU pages to xfer
2951 : * @fence: radeon fence object
2952 : *
2953 : * Copy GPU paging using the CP DMA engine (r6xx+).
2954 : * Used by the radeon ttm implementation to move pages if
2955 : * registered as the asic copy callback.
2956 : */
2957 0 : struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2958 : uint64_t src_offset, uint64_t dst_offset,
2959 : unsigned num_gpu_pages,
2960 : struct reservation_object *resv)
2961 : {
2962 0 : struct radeon_fence *fence;
2963 0 : struct radeon_sync sync;
2964 0 : int ring_index = rdev->asic->copy.blit_ring_index;
2965 0 : struct radeon_ring *ring = &rdev->ring[ring_index];
2966 : u32 size_in_bytes, cur_size_in_bytes, tmp;
2967 : int i, num_loops;
2968 : int r = 0;
2969 :
2970 0 : radeon_sync_create(&sync);
2971 :
2972 0 : size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2973 0 : num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2974 0 : r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2975 0 : if (r) {
2976 0 : DRM_ERROR("radeon: moving bo (%d).\n", r);
2977 0 : radeon_sync_free(rdev, &sync, NULL);
2978 0 : return ERR_PTR(r);
2979 : }
2980 :
2981 0 : radeon_sync_resv(rdev, &sync, resv, false);
2982 0 : radeon_sync_rings(rdev, &sync, ring->idx);
2983 :
2984 0 : radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2985 0 : radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2986 0 : radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2987 0 : for (i = 0; i < num_loops; i++) {
2988 : cur_size_in_bytes = size_in_bytes;
2989 0 : if (cur_size_in_bytes > 0x1fffff)
2990 : cur_size_in_bytes = 0x1fffff;
2991 0 : size_in_bytes -= cur_size_in_bytes;
2992 0 : tmp = upper_32_bits(src_offset) & 0xff;
2993 0 : if (size_in_bytes == 0)
2994 0 : tmp |= PACKET3_CP_DMA_CP_SYNC;
2995 0 : radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2996 0 : radeon_ring_write(ring, lower_32_bits(src_offset));
2997 0 : radeon_ring_write(ring, tmp);
2998 0 : radeon_ring_write(ring, lower_32_bits(dst_offset));
2999 0 : radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3000 0 : radeon_ring_write(ring, cur_size_in_bytes);
3001 0 : src_offset += cur_size_in_bytes;
3002 0 : dst_offset += cur_size_in_bytes;
3003 : }
3004 0 : radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3005 0 : radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3006 0 : radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
3007 :
3008 0 : r = radeon_fence_emit(rdev, &fence, ring->idx);
3009 0 : if (r) {
3010 0 : radeon_ring_unlock_undo(rdev, ring);
3011 0 : radeon_sync_free(rdev, &sync, NULL);
3012 0 : return ERR_PTR(r);
3013 : }
3014 :
3015 0 : radeon_ring_unlock_commit(rdev, ring, false);
3016 0 : radeon_sync_free(rdev, &sync, fence);
3017 :
3018 0 : return fence;
3019 0 : }
3020 :
3021 0 : int r600_set_surface_reg(struct radeon_device *rdev, int reg,
3022 : uint32_t tiling_flags, uint32_t pitch,
3023 : uint32_t offset, uint32_t obj_size)
3024 : {
3025 : /* FIXME: implement */
3026 0 : return 0;
3027 : }
3028 :
3029 0 : void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
3030 : {
3031 : /* FIXME: implement */
3032 0 : }
3033 :
3034 0 : static int r600_startup(struct radeon_device *rdev)
3035 : {
3036 : struct radeon_ring *ring;
3037 : int r;
3038 :
3039 : /* enable pcie gen2 link */
3040 0 : r600_pcie_gen2_enable(rdev);
3041 :
3042 : /* scratch needs to be initialized before MC */
3043 0 : r = r600_vram_scratch_init(rdev);
3044 0 : if (r)
3045 0 : return r;
3046 :
3047 0 : r600_mc_program(rdev);
3048 :
3049 0 : if (rdev->flags & RADEON_IS_AGP) {
3050 0 : r600_agp_enable(rdev);
3051 0 : } else {
3052 0 : r = r600_pcie_gart_enable(rdev);
3053 0 : if (r)
3054 0 : return r;
3055 : }
3056 0 : r600_gpu_init(rdev);
3057 :
3058 : /* allocate wb buffer */
3059 0 : r = radeon_wb_init(rdev);
3060 0 : if (r)
3061 0 : return r;
3062 :
3063 0 : r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3064 0 : if (r) {
3065 0 : dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3066 0 : return r;
3067 : }
3068 :
3069 0 : if (rdev->has_uvd) {
3070 0 : r = uvd_v1_0_resume(rdev);
3071 0 : if (!r) {
3072 0 : r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
3073 0 : if (r) {
3074 0 : dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
3075 0 : }
3076 : }
3077 0 : if (r)
3078 0 : rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3079 : }
3080 :
3081 : /* Enable IRQ */
3082 0 : if (!rdev->irq.installed) {
3083 0 : r = radeon_irq_kms_init(rdev);
3084 0 : if (r)
3085 0 : return r;
3086 : }
3087 :
3088 0 : r = r600_irq_init(rdev);
3089 0 : if (r) {
3090 0 : DRM_ERROR("radeon: IH init failed (%d).\n", r);
3091 0 : radeon_irq_kms_fini(rdev);
3092 0 : return r;
3093 : }
3094 0 : r600_irq_set(rdev);
3095 :
3096 0 : ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3097 0 : r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3098 : RADEON_CP_PACKET2);
3099 0 : if (r)
3100 0 : return r;
3101 :
3102 0 : r = r600_cp_load_microcode(rdev);
3103 0 : if (r)
3104 0 : return r;
3105 0 : r = r600_cp_resume(rdev);
3106 0 : if (r)
3107 0 : return r;
3108 :
3109 0 : if (rdev->has_uvd) {
3110 0 : ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
3111 0 : if (ring->ring_size) {
3112 0 : r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
3113 : RADEON_CP_PACKET2);
3114 0 : if (!r)
3115 0 : r = uvd_v1_0_init(rdev);
3116 0 : if (r)
3117 0 : DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
3118 : }
3119 : }
3120 :
3121 0 : r = radeon_ib_pool_init(rdev);
3122 0 : if (r) {
3123 0 : dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3124 0 : return r;
3125 : }
3126 :
3127 0 : r = radeon_audio_init(rdev);
3128 0 : if (r) {
3129 0 : DRM_ERROR("radeon: audio init failed\n");
3130 0 : return r;
3131 : }
3132 :
3133 0 : return 0;
3134 0 : }
3135 :
3136 0 : void r600_vga_set_state(struct radeon_device *rdev, bool state)
3137 : {
3138 : uint32_t temp;
3139 :
3140 0 : temp = RREG32(CONFIG_CNTL);
3141 0 : if (state == false) {
3142 0 : temp &= ~(1<<0);
3143 0 : temp |= (1<<1);
3144 0 : } else {
3145 0 : temp &= ~(1<<1);
3146 : }
3147 0 : WREG32(CONFIG_CNTL, temp);
3148 0 : }
3149 :
3150 0 : int r600_resume(struct radeon_device *rdev)
3151 : {
3152 : int r;
3153 :
3154 : /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
3155 : * posting will perform necessary task to bring back GPU into good
3156 : * shape.
3157 : */
3158 : /* post card */
3159 0 : atom_asic_init(rdev->mode_info.atom_context);
3160 :
3161 0 : if (rdev->pm.pm_method == PM_METHOD_DPM)
3162 0 : radeon_pm_resume(rdev);
3163 :
3164 0 : rdev->accel_working = true;
3165 0 : r = r600_startup(rdev);
3166 0 : if (r) {
3167 0 : DRM_ERROR("r600 startup failed on resume\n");
3168 0 : rdev->accel_working = false;
3169 0 : return r;
3170 : }
3171 :
3172 0 : return r;
3173 0 : }
3174 :
3175 0 : int r600_suspend(struct radeon_device *rdev)
3176 : {
3177 0 : radeon_pm_suspend(rdev);
3178 0 : radeon_audio_fini(rdev);
3179 0 : r600_cp_stop(rdev);
3180 0 : if (rdev->has_uvd) {
3181 0 : uvd_v1_0_fini(rdev);
3182 0 : radeon_uvd_suspend(rdev);
3183 0 : }
3184 0 : r600_irq_suspend(rdev);
3185 0 : radeon_wb_disable(rdev);
3186 0 : r600_pcie_gart_disable(rdev);
3187 :
3188 0 : return 0;
3189 : }
3190 :
3191 : /* Plan is to move initialization in that function and use
3192 : * helper function so that radeon_device_init pretty much
3193 : * do nothing more than calling asic specific function. This
3194 : * should also allow to remove a bunch of callback function
3195 : * like vram_info.
3196 : */
3197 0 : int r600_init(struct radeon_device *rdev)
3198 : {
3199 : int r;
3200 :
3201 0 : if (r600_debugfs_mc_info_init(rdev)) {
3202 0 : DRM_ERROR("Failed to register debugfs file for mc !\n");
3203 0 : }
3204 : /* Read BIOS */
3205 0 : if (!radeon_get_bios(rdev)) {
3206 0 : if (ASIC_IS_AVIVO(rdev))
3207 0 : return -EINVAL;
3208 : }
3209 : /* Must be an ATOMBIOS */
3210 0 : if (!rdev->is_atom_bios) {
3211 0 : dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3212 0 : return -EINVAL;
3213 : }
3214 0 : r = radeon_atombios_init(rdev);
3215 0 : if (r)
3216 0 : return r;
3217 : /* Post card if necessary */
3218 0 : if (!radeon_card_posted(rdev)) {
3219 0 : if (!rdev->bios) {
3220 0 : dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3221 0 : return -EINVAL;
3222 : }
3223 : DRM_INFO("GPU not posted. posting now...\n");
3224 0 : atom_asic_init(rdev->mode_info.atom_context);
3225 0 : }
3226 : /* Initialize scratch registers */
3227 0 : r600_scratch_init(rdev);
3228 : /* Initialize surface registers */
3229 0 : radeon_surface_init(rdev);
3230 : /* Initialize clocks */
3231 0 : radeon_get_clock_info(rdev->ddev);
3232 : /* Fence driver */
3233 0 : r = radeon_fence_driver_init(rdev);
3234 0 : if (r)
3235 0 : return r;
3236 0 : if (rdev->flags & RADEON_IS_AGP) {
3237 0 : r = radeon_agp_init(rdev);
3238 0 : if (r)
3239 0 : radeon_agp_disable(rdev);
3240 : }
3241 0 : r = r600_mc_init(rdev);
3242 0 : if (r)
3243 0 : return r;
3244 : /* Memory manager */
3245 0 : r = radeon_bo_init(rdev);
3246 0 : if (r)
3247 0 : return r;
3248 :
3249 0 : if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3250 0 : r = r600_init_microcode(rdev);
3251 0 : if (r) {
3252 0 : DRM_ERROR("Failed to load firmware!\n");
3253 0 : return r;
3254 : }
3255 : }
3256 :
3257 : /* Initialize power management */
3258 0 : radeon_pm_init(rdev);
3259 :
3260 0 : rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3261 0 : r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3262 :
3263 0 : if (rdev->has_uvd) {
3264 0 : r = radeon_uvd_init(rdev);
3265 0 : if (!r) {
3266 0 : rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
3267 0 : r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
3268 0 : }
3269 : }
3270 :
3271 0 : rdev->ih.ring_obj = NULL;
3272 0 : r600_ih_ring_init(rdev, 64 * 1024);
3273 :
3274 0 : r = r600_pcie_gart_init(rdev);
3275 0 : if (r)
3276 0 : return r;
3277 :
3278 0 : rdev->accel_working = true;
3279 0 : r = r600_startup(rdev);
3280 0 : if (r) {
3281 0 : dev_err(rdev->dev, "disabling GPU acceleration\n");
3282 0 : r600_cp_fini(rdev);
3283 0 : r600_irq_fini(rdev);
3284 0 : radeon_wb_fini(rdev);
3285 0 : radeon_ib_pool_fini(rdev);
3286 0 : radeon_irq_kms_fini(rdev);
3287 0 : r600_pcie_gart_fini(rdev);
3288 0 : rdev->accel_working = false;
3289 0 : }
3290 :
3291 0 : return 0;
3292 0 : }
3293 :
3294 0 : void r600_fini(struct radeon_device *rdev)
3295 : {
3296 0 : radeon_pm_fini(rdev);
3297 0 : radeon_audio_fini(rdev);
3298 0 : r600_cp_fini(rdev);
3299 0 : r600_irq_fini(rdev);
3300 0 : if (rdev->has_uvd) {
3301 0 : uvd_v1_0_fini(rdev);
3302 0 : radeon_uvd_fini(rdev);
3303 0 : }
3304 0 : radeon_wb_fini(rdev);
3305 0 : radeon_ib_pool_fini(rdev);
3306 0 : radeon_irq_kms_fini(rdev);
3307 0 : r600_pcie_gart_fini(rdev);
3308 0 : r600_vram_scratch_fini(rdev);
3309 0 : radeon_agp_fini(rdev);
3310 0 : radeon_gem_fini(rdev);
3311 0 : radeon_fence_driver_fini(rdev);
3312 0 : radeon_bo_fini(rdev);
3313 0 : radeon_atombios_fini(rdev);
3314 0 : kfree(rdev->bios);
3315 0 : rdev->bios = NULL;
3316 0 : }
3317 :
3318 :
3319 : /*
3320 : * CS stuff
3321 : */
3322 0 : void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3323 : {
3324 0 : struct radeon_ring *ring = &rdev->ring[ib->ring];
3325 : u32 next_rptr;
3326 :
3327 0 : if (ring->rptr_save_reg) {
3328 0 : next_rptr = ring->wptr + 3 + 4;
3329 0 : radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3330 0 : radeon_ring_write(ring, ((ring->rptr_save_reg -
3331 0 : PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3332 0 : radeon_ring_write(ring, next_rptr);
3333 0 : } else if (rdev->wb.enabled) {
3334 0 : next_rptr = ring->wptr + 5 + 4;
3335 0 : radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3336 0 : radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3337 0 : radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3338 0 : radeon_ring_write(ring, next_rptr);
3339 0 : radeon_ring_write(ring, 0);
3340 0 : }
3341 :
3342 0 : radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3343 0 : radeon_ring_write(ring,
3344 : #ifdef __BIG_ENDIAN
3345 : (2 << 0) |
3346 : #endif
3347 0 : (ib->gpu_addr & 0xFFFFFFFC));
3348 0 : radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3349 0 : radeon_ring_write(ring, ib->length_dw);
3350 0 : }
3351 :
3352 0 : int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3353 : {
3354 0 : struct radeon_ib ib;
3355 0 : uint32_t scratch;
3356 : uint32_t tmp = 0;
3357 : unsigned i;
3358 : int r;
3359 :
3360 0 : r = radeon_scratch_get(rdev, &scratch);
3361 0 : if (r) {
3362 0 : DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3363 0 : return r;
3364 : }
3365 0 : WREG32(scratch, 0xCAFEDEAD);
3366 0 : r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3367 0 : if (r) {
3368 0 : DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3369 0 : goto free_scratch;
3370 : }
3371 0 : ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3372 0 : ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3373 0 : ib.ptr[2] = 0xDEADBEEF;
3374 0 : ib.length_dw = 3;
3375 0 : r = radeon_ib_schedule(rdev, &ib, NULL, false);
3376 0 : if (r) {
3377 0 : DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3378 0 : goto free_ib;
3379 : }
3380 0 : r = radeon_fence_wait(ib.fence, false);
3381 0 : if (r) {
3382 0 : DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3383 0 : goto free_ib;
3384 : }
3385 0 : for (i = 0; i < rdev->usec_timeout; i++) {
3386 0 : tmp = RREG32(scratch);
3387 0 : if (tmp == 0xDEADBEEF)
3388 : break;
3389 0 : DRM_UDELAY(1);
3390 : }
3391 0 : if (i < rdev->usec_timeout) {
3392 : DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3393 : } else {
3394 0 : DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3395 : scratch, tmp);
3396 : r = -EINVAL;
3397 : }
3398 : free_ib:
3399 0 : radeon_ib_free(rdev, &ib);
3400 : free_scratch:
3401 0 : radeon_scratch_free(rdev, scratch);
3402 0 : return r;
3403 0 : }
3404 :
3405 : /*
3406 : * Interrupts
3407 : *
3408 : * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3409 : * the same as the CP ring buffer, but in reverse. Rather than the CPU
3410 : * writing to the ring and the GPU consuming, the GPU writes to the ring
3411 : * and host consumes. As the host irq handler processes interrupts, it
3412 : * increments the rptr. When the rptr catches up with the wptr, all the
3413 : * current interrupts have been processed.
3414 : */
3415 :
3416 0 : void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3417 : {
3418 : u32 rb_bufsz;
3419 :
3420 : /* Align ring size */
3421 0 : rb_bufsz = order_base_2(ring_size / 4);
3422 0 : ring_size = (1 << rb_bufsz) * 4;
3423 0 : rdev->ih.ring_size = ring_size;
3424 0 : rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3425 0 : rdev->ih.rptr = 0;
3426 0 : }
3427 :
3428 0 : int r600_ih_ring_alloc(struct radeon_device *rdev)
3429 : {
3430 : int r;
3431 :
3432 : /* Allocate ring buffer */
3433 0 : if (rdev->ih.ring_obj == NULL) {
3434 0 : r = radeon_bo_create(rdev, rdev->ih.ring_size,
3435 : PAGE_SIZE, true,
3436 : RADEON_GEM_DOMAIN_GTT, 0,
3437 : NULL, NULL, &rdev->ih.ring_obj);
3438 0 : if (r) {
3439 0 : DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3440 0 : return r;
3441 : }
3442 0 : r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3443 0 : if (unlikely(r != 0))
3444 0 : return r;
3445 0 : r = radeon_bo_pin(rdev->ih.ring_obj,
3446 : RADEON_GEM_DOMAIN_GTT,
3447 0 : &rdev->ih.gpu_addr);
3448 0 : if (r) {
3449 0 : radeon_bo_unreserve(rdev->ih.ring_obj);
3450 0 : DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3451 0 : return r;
3452 : }
3453 0 : r = radeon_bo_kmap(rdev->ih.ring_obj,
3454 0 : (void **)&rdev->ih.ring);
3455 0 : radeon_bo_unreserve(rdev->ih.ring_obj);
3456 0 : if (r) {
3457 0 : DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3458 0 : return r;
3459 : }
3460 : }
3461 0 : return 0;
3462 0 : }
3463 :
3464 0 : void r600_ih_ring_fini(struct radeon_device *rdev)
3465 : {
3466 : int r;
3467 0 : if (rdev->ih.ring_obj) {
3468 0 : r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3469 0 : if (likely(r == 0)) {
3470 0 : radeon_bo_kunmap(rdev->ih.ring_obj);
3471 0 : radeon_bo_unpin(rdev->ih.ring_obj);
3472 0 : radeon_bo_unreserve(rdev->ih.ring_obj);
3473 0 : }
3474 0 : radeon_bo_unref(&rdev->ih.ring_obj);
3475 0 : rdev->ih.ring = NULL;
3476 0 : rdev->ih.ring_obj = NULL;
3477 0 : }
3478 0 : }
3479 :
3480 0 : void r600_rlc_stop(struct radeon_device *rdev)
3481 : {
3482 :
3483 0 : if ((rdev->family >= CHIP_RV770) &&
3484 0 : (rdev->family <= CHIP_RV740)) {
3485 : /* r7xx asics need to soft reset RLC before halting */
3486 0 : WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3487 0 : RREG32(SRBM_SOFT_RESET);
3488 0 : mdelay(15);
3489 0 : WREG32(SRBM_SOFT_RESET, 0);
3490 0 : RREG32(SRBM_SOFT_RESET);
3491 0 : }
3492 :
3493 0 : WREG32(RLC_CNTL, 0);
3494 0 : }
3495 :
3496 0 : static void r600_rlc_start(struct radeon_device *rdev)
3497 : {
3498 0 : WREG32(RLC_CNTL, RLC_ENABLE);
3499 0 : }
3500 :
3501 0 : static int r600_rlc_resume(struct radeon_device *rdev)
3502 : {
3503 : u32 i;
3504 : const __be32 *fw_data;
3505 :
3506 0 : if (!rdev->rlc_fw)
3507 0 : return -EINVAL;
3508 :
3509 0 : r600_rlc_stop(rdev);
3510 :
3511 0 : WREG32(RLC_HB_CNTL, 0);
3512 :
3513 0 : WREG32(RLC_HB_BASE, 0);
3514 0 : WREG32(RLC_HB_RPTR, 0);
3515 0 : WREG32(RLC_HB_WPTR, 0);
3516 0 : WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3517 0 : WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3518 0 : WREG32(RLC_MC_CNTL, 0);
3519 0 : WREG32(RLC_UCODE_CNTL, 0);
3520 :
3521 0 : fw_data = (const __be32 *)rdev->rlc_fw->data;
3522 0 : if (rdev->family >= CHIP_RV770) {
3523 0 : for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3524 0 : WREG32(RLC_UCODE_ADDR, i);
3525 0 : WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3526 : }
3527 : } else {
3528 0 : for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3529 0 : WREG32(RLC_UCODE_ADDR, i);
3530 0 : WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3531 : }
3532 : }
3533 0 : WREG32(RLC_UCODE_ADDR, 0);
3534 :
3535 0 : r600_rlc_start(rdev);
3536 :
3537 0 : return 0;
3538 0 : }
3539 :
3540 0 : static void r600_enable_interrupts(struct radeon_device *rdev)
3541 : {
3542 0 : u32 ih_cntl = RREG32(IH_CNTL);
3543 0 : u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3544 :
3545 0 : ih_cntl |= ENABLE_INTR;
3546 0 : ih_rb_cntl |= IH_RB_ENABLE;
3547 0 : WREG32(IH_CNTL, ih_cntl);
3548 0 : WREG32(IH_RB_CNTL, ih_rb_cntl);
3549 0 : rdev->ih.enabled = true;
3550 0 : }
3551 :
3552 0 : void r600_disable_interrupts(struct radeon_device *rdev)
3553 : {
3554 0 : u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3555 0 : u32 ih_cntl = RREG32(IH_CNTL);
3556 :
3557 0 : ih_rb_cntl &= ~IH_RB_ENABLE;
3558 0 : ih_cntl &= ~ENABLE_INTR;
3559 0 : WREG32(IH_RB_CNTL, ih_rb_cntl);
3560 0 : WREG32(IH_CNTL, ih_cntl);
3561 : /* set rptr, wptr to 0 */
3562 0 : WREG32(IH_RB_RPTR, 0);
3563 0 : WREG32(IH_RB_WPTR, 0);
3564 0 : rdev->ih.enabled = false;
3565 0 : rdev->ih.rptr = 0;
3566 0 : }
3567 :
3568 0 : static void r600_disable_interrupt_state(struct radeon_device *rdev)
3569 : {
3570 : u32 tmp;
3571 :
3572 0 : WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3573 0 : tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3574 0 : WREG32(DMA_CNTL, tmp);
3575 0 : WREG32(GRBM_INT_CNTL, 0);
3576 0 : WREG32(DxMODE_INT_MASK, 0);
3577 0 : WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3578 0 : WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3579 0 : if (ASIC_IS_DCE3(rdev)) {
3580 0 : WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3581 0 : WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3582 0 : tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3583 0 : WREG32(DC_HPD1_INT_CONTROL, tmp);
3584 0 : tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3585 0 : WREG32(DC_HPD2_INT_CONTROL, tmp);
3586 0 : tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3587 0 : WREG32(DC_HPD3_INT_CONTROL, tmp);
3588 0 : tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3589 0 : WREG32(DC_HPD4_INT_CONTROL, tmp);
3590 0 : if (ASIC_IS_DCE32(rdev)) {
3591 0 : tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3592 0 : WREG32(DC_HPD5_INT_CONTROL, tmp);
3593 0 : tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3594 0 : WREG32(DC_HPD6_INT_CONTROL, tmp);
3595 0 : tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3596 0 : WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3597 0 : tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3598 0 : WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3599 0 : } else {
3600 0 : tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3601 0 : WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3602 0 : tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3603 0 : WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3604 : }
3605 : } else {
3606 0 : WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3607 0 : WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3608 0 : tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3609 0 : WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3610 0 : tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3611 0 : WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3612 0 : tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3613 0 : WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3614 0 : tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3615 0 : WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3616 0 : tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3617 0 : WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3618 : }
3619 0 : }
3620 :
3621 0 : int r600_irq_init(struct radeon_device *rdev)
3622 : {
3623 : int ret = 0;
3624 : int rb_bufsz;
3625 : u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3626 :
3627 : /* allocate ring */
3628 0 : ret = r600_ih_ring_alloc(rdev);
3629 0 : if (ret)
3630 0 : return ret;
3631 :
3632 : /* disable irqs */
3633 0 : r600_disable_interrupts(rdev);
3634 :
3635 : /* init rlc */
3636 0 : if (rdev->family >= CHIP_CEDAR)
3637 0 : ret = evergreen_rlc_resume(rdev);
3638 : else
3639 0 : ret = r600_rlc_resume(rdev);
3640 0 : if (ret) {
3641 0 : r600_ih_ring_fini(rdev);
3642 0 : return ret;
3643 : }
3644 :
3645 : /* setup interrupt control */
3646 : /* set dummy read address to ring address */
3647 0 : WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3648 0 : interrupt_cntl = RREG32(INTERRUPT_CNTL);
3649 : /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3650 : * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3651 : */
3652 0 : interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3653 : /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3654 0 : interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3655 0 : WREG32(INTERRUPT_CNTL, interrupt_cntl);
3656 :
3657 0 : WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3658 0 : rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3659 :
3660 : ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3661 0 : IH_WPTR_OVERFLOW_CLEAR |
3662 0 : (rb_bufsz << 1));
3663 :
3664 0 : if (rdev->wb.enabled)
3665 0 : ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3666 :
3667 : /* set the writeback address whether it's enabled or not */
3668 0 : WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3669 0 : WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3670 :
3671 0 : WREG32(IH_RB_CNTL, ih_rb_cntl);
3672 :
3673 : /* set rptr, wptr to 0 */
3674 0 : WREG32(IH_RB_RPTR, 0);
3675 0 : WREG32(IH_RB_WPTR, 0);
3676 :
3677 : /* Default settings for IH_CNTL (disabled at first) */
3678 : ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3679 : /* RPTR_REARM only works if msi's are enabled */
3680 0 : if (rdev->msi_enabled)
3681 0 : ih_cntl |= RPTR_REARM;
3682 0 : WREG32(IH_CNTL, ih_cntl);
3683 :
3684 : /* force the active interrupt state to all disabled */
3685 0 : if (rdev->family >= CHIP_CEDAR)
3686 0 : evergreen_disable_interrupt_state(rdev);
3687 : else
3688 0 : r600_disable_interrupt_state(rdev);
3689 :
3690 : /* at this point everything should be setup correctly to enable master */
3691 : pci_set_master(rdev->pdev);
3692 :
3693 : /* enable irqs */
3694 0 : r600_enable_interrupts(rdev);
3695 :
3696 0 : return ret;
3697 0 : }
3698 :
3699 0 : void r600_irq_suspend(struct radeon_device *rdev)
3700 : {
3701 0 : r600_irq_disable(rdev);
3702 0 : r600_rlc_stop(rdev);
3703 0 : }
3704 :
3705 0 : void r600_irq_fini(struct radeon_device *rdev)
3706 : {
3707 0 : r600_irq_suspend(rdev);
3708 0 : r600_ih_ring_fini(rdev);
3709 0 : }
3710 :
3711 0 : int r600_irq_set(struct radeon_device *rdev)
3712 : {
3713 : u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3714 : u32 mode_int = 0;
3715 : u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3716 : u32 grbm_int_cntl = 0;
3717 : u32 hdmi0, hdmi1;
3718 : u32 dma_cntl;
3719 : u32 thermal_int = 0;
3720 :
3721 0 : if (!rdev->irq.installed) {
3722 0 : WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3723 0 : return -EINVAL;
3724 : }
3725 : /* don't enable anything if the ih is disabled */
3726 0 : if (!rdev->ih.enabled) {
3727 0 : r600_disable_interrupts(rdev);
3728 : /* force the active interrupt state to all disabled */
3729 0 : r600_disable_interrupt_state(rdev);
3730 0 : return 0;
3731 : }
3732 :
3733 0 : if (ASIC_IS_DCE3(rdev)) {
3734 0 : hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3735 0 : hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3736 0 : hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3737 0 : hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3738 0 : if (ASIC_IS_DCE32(rdev)) {
3739 0 : hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3740 0 : hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3741 0 : hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3742 0 : hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3743 0 : } else {
3744 0 : hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3745 0 : hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3746 : }
3747 : } else {
3748 0 : hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3749 0 : hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3750 0 : hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3751 0 : hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3752 0 : hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3753 : }
3754 :
3755 0 : dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3756 :
3757 0 : if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3758 0 : thermal_int = RREG32(CG_THERMAL_INT) &
3759 : ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3760 0 : } else if (rdev->family >= CHIP_RV770) {
3761 0 : thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3762 : ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3763 0 : }
3764 0 : if (rdev->irq.dpm_thermal) {
3765 : DRM_DEBUG("dpm thermal\n");
3766 0 : thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3767 0 : }
3768 :
3769 0 : if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3770 : DRM_DEBUG("r600_irq_set: sw int\n");
3771 : cp_int_cntl |= RB_INT_ENABLE;
3772 : cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3773 0 : }
3774 :
3775 0 : if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3776 : DRM_DEBUG("r600_irq_set: sw int dma\n");
3777 0 : dma_cntl |= TRAP_ENABLE;
3778 0 : }
3779 :
3780 0 : if (rdev->irq.crtc_vblank_int[0] ||
3781 0 : atomic_read(&rdev->irq.pflip[0])) {
3782 : DRM_DEBUG("r600_irq_set: vblank 0\n");
3783 : mode_int |= D1MODE_VBLANK_INT_MASK;
3784 0 : }
3785 0 : if (rdev->irq.crtc_vblank_int[1] ||
3786 0 : atomic_read(&rdev->irq.pflip[1])) {
3787 : DRM_DEBUG("r600_irq_set: vblank 1\n");
3788 0 : mode_int |= D2MODE_VBLANK_INT_MASK;
3789 0 : }
3790 0 : if (rdev->irq.hpd[0]) {
3791 : DRM_DEBUG("r600_irq_set: hpd 1\n");
3792 0 : hpd1 |= DC_HPDx_INT_EN;
3793 0 : }
3794 0 : if (rdev->irq.hpd[1]) {
3795 : DRM_DEBUG("r600_irq_set: hpd 2\n");
3796 0 : hpd2 |= DC_HPDx_INT_EN;
3797 0 : }
3798 0 : if (rdev->irq.hpd[2]) {
3799 : DRM_DEBUG("r600_irq_set: hpd 3\n");
3800 0 : hpd3 |= DC_HPDx_INT_EN;
3801 0 : }
3802 0 : if (rdev->irq.hpd[3]) {
3803 : DRM_DEBUG("r600_irq_set: hpd 4\n");
3804 0 : hpd4 |= DC_HPDx_INT_EN;
3805 0 : }
3806 0 : if (rdev->irq.hpd[4]) {
3807 : DRM_DEBUG("r600_irq_set: hpd 5\n");
3808 0 : hpd5 |= DC_HPDx_INT_EN;
3809 0 : }
3810 0 : if (rdev->irq.hpd[5]) {
3811 : DRM_DEBUG("r600_irq_set: hpd 6\n");
3812 0 : hpd6 |= DC_HPDx_INT_EN;
3813 0 : }
3814 0 : if (rdev->irq.afmt[0]) {
3815 : DRM_DEBUG("r600_irq_set: hdmi 0\n");
3816 0 : hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3817 0 : }
3818 0 : if (rdev->irq.afmt[1]) {
3819 : DRM_DEBUG("r600_irq_set: hdmi 0\n");
3820 0 : hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3821 0 : }
3822 :
3823 0 : WREG32(CP_INT_CNTL, cp_int_cntl);
3824 0 : WREG32(DMA_CNTL, dma_cntl);
3825 0 : WREG32(DxMODE_INT_MASK, mode_int);
3826 0 : WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3827 0 : WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3828 0 : WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3829 0 : if (ASIC_IS_DCE3(rdev)) {
3830 0 : WREG32(DC_HPD1_INT_CONTROL, hpd1);
3831 0 : WREG32(DC_HPD2_INT_CONTROL, hpd2);
3832 0 : WREG32(DC_HPD3_INT_CONTROL, hpd3);
3833 0 : WREG32(DC_HPD4_INT_CONTROL, hpd4);
3834 0 : if (ASIC_IS_DCE32(rdev)) {
3835 0 : WREG32(DC_HPD5_INT_CONTROL, hpd5);
3836 0 : WREG32(DC_HPD6_INT_CONTROL, hpd6);
3837 0 : WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3838 0 : WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3839 0 : } else {
3840 0 : WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3841 0 : WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3842 : }
3843 : } else {
3844 0 : WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3845 0 : WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3846 0 : WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3847 0 : WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3848 0 : WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3849 : }
3850 0 : if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3851 0 : WREG32(CG_THERMAL_INT, thermal_int);
3852 0 : } else if (rdev->family >= CHIP_RV770) {
3853 0 : WREG32(RV770_CG_THERMAL_INT, thermal_int);
3854 0 : }
3855 :
3856 : /* posting read */
3857 0 : RREG32(R_000E50_SRBM_STATUS);
3858 :
3859 0 : return 0;
3860 0 : }
3861 :
3862 0 : static void r600_irq_ack(struct radeon_device *rdev)
3863 : {
3864 : u32 tmp;
3865 :
3866 0 : if (ASIC_IS_DCE3(rdev)) {
3867 0 : rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3868 0 : rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3869 0 : rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3870 0 : if (ASIC_IS_DCE32(rdev)) {
3871 0 : rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3872 0 : rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3873 0 : } else {
3874 0 : rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3875 0 : rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3876 : }
3877 : } else {
3878 0 : rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3879 0 : rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3880 0 : rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3881 0 : rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3882 0 : rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3883 : }
3884 0 : rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3885 0 : rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3886 :
3887 0 : if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3888 0 : WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3889 0 : if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3890 0 : WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3891 0 : if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3892 0 : WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3893 0 : if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3894 0 : WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3895 0 : if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3896 0 : WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3897 0 : if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3898 0 : WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3899 0 : if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3900 0 : if (ASIC_IS_DCE3(rdev)) {
3901 0 : tmp = RREG32(DC_HPD1_INT_CONTROL);
3902 0 : tmp |= DC_HPDx_INT_ACK;
3903 0 : WREG32(DC_HPD1_INT_CONTROL, tmp);
3904 0 : } else {
3905 0 : tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3906 0 : tmp |= DC_HPDx_INT_ACK;
3907 0 : WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3908 : }
3909 : }
3910 0 : if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3911 0 : if (ASIC_IS_DCE3(rdev)) {
3912 0 : tmp = RREG32(DC_HPD2_INT_CONTROL);
3913 0 : tmp |= DC_HPDx_INT_ACK;
3914 0 : WREG32(DC_HPD2_INT_CONTROL, tmp);
3915 0 : } else {
3916 0 : tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3917 0 : tmp |= DC_HPDx_INT_ACK;
3918 0 : WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3919 : }
3920 : }
3921 0 : if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3922 0 : if (ASIC_IS_DCE3(rdev)) {
3923 0 : tmp = RREG32(DC_HPD3_INT_CONTROL);
3924 0 : tmp |= DC_HPDx_INT_ACK;
3925 0 : WREG32(DC_HPD3_INT_CONTROL, tmp);
3926 0 : } else {
3927 0 : tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3928 0 : tmp |= DC_HPDx_INT_ACK;
3929 0 : WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3930 : }
3931 : }
3932 0 : if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3933 0 : tmp = RREG32(DC_HPD4_INT_CONTROL);
3934 0 : tmp |= DC_HPDx_INT_ACK;
3935 0 : WREG32(DC_HPD4_INT_CONTROL, tmp);
3936 0 : }
3937 0 : if (ASIC_IS_DCE32(rdev)) {
3938 0 : if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3939 0 : tmp = RREG32(DC_HPD5_INT_CONTROL);
3940 0 : tmp |= DC_HPDx_INT_ACK;
3941 0 : WREG32(DC_HPD5_INT_CONTROL, tmp);
3942 0 : }
3943 0 : if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3944 0 : tmp = RREG32(DC_HPD6_INT_CONTROL);
3945 0 : tmp |= DC_HPDx_INT_ACK;
3946 0 : WREG32(DC_HPD6_INT_CONTROL, tmp);
3947 0 : }
3948 0 : if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3949 0 : tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3950 0 : tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3951 0 : WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3952 0 : }
3953 0 : if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3954 0 : tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3955 0 : tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3956 0 : WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3957 0 : }
3958 : } else {
3959 0 : if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3960 0 : tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3961 0 : tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3962 0 : WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3963 0 : }
3964 0 : if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3965 0 : if (ASIC_IS_DCE3(rdev)) {
3966 0 : tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3967 0 : tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3968 0 : WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3969 0 : } else {
3970 0 : tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3971 0 : tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3972 0 : WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3973 : }
3974 : }
3975 : }
3976 0 : }
3977 :
3978 0 : void r600_irq_disable(struct radeon_device *rdev)
3979 : {
3980 0 : r600_disable_interrupts(rdev);
3981 : /* Wait and acknowledge irq */
3982 0 : mdelay(1);
3983 0 : r600_irq_ack(rdev);
3984 0 : r600_disable_interrupt_state(rdev);
3985 0 : }
3986 :
3987 0 : static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3988 : {
3989 : u32 wptr, tmp;
3990 :
3991 0 : if (rdev->wb.enabled)
3992 0 : wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3993 : else
3994 0 : wptr = RREG32(IH_RB_WPTR);
3995 :
3996 0 : if (wptr & RB_OVERFLOW) {
3997 0 : wptr &= ~RB_OVERFLOW;
3998 : /* When a ring buffer overflow happen start parsing interrupt
3999 : * from the last not overwritten vector (wptr + 16). Hopefully
4000 : * this should allow us to catchup.
4001 : */
4002 0 : dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
4003 : wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
4004 0 : rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
4005 0 : tmp = RREG32(IH_RB_CNTL);
4006 0 : tmp |= IH_WPTR_OVERFLOW_CLEAR;
4007 0 : WREG32(IH_RB_CNTL, tmp);
4008 0 : }
4009 0 : return (wptr & rdev->ih.ptr_mask);
4010 : }
4011 :
4012 : /* r600 IV Ring
4013 : * Each IV ring entry is 128 bits:
4014 : * [7:0] - interrupt source id
4015 : * [31:8] - reserved
4016 : * [59:32] - interrupt source data
4017 : * [127:60] - reserved
4018 : *
4019 : * The basic interrupt vector entries
4020 : * are decoded as follows:
4021 : * src_id src_data description
4022 : * 1 0 D1 Vblank
4023 : * 1 1 D1 Vline
4024 : * 5 0 D2 Vblank
4025 : * 5 1 D2 Vline
4026 : * 19 0 FP Hot plug detection A
4027 : * 19 1 FP Hot plug detection B
4028 : * 19 2 DAC A auto-detection
4029 : * 19 3 DAC B auto-detection
4030 : * 21 4 HDMI block A
4031 : * 21 5 HDMI block B
4032 : * 176 - CP_INT RB
4033 : * 177 - CP_INT IB1
4034 : * 178 - CP_INT IB2
4035 : * 181 - EOP Interrupt
4036 : * 233 - GUI Idle
4037 : *
4038 : * Note, these are based on r600 and may need to be
4039 : * adjusted or added to on newer asics
4040 : */
4041 :
4042 0 : int r600_irq_process(struct radeon_device *rdev)
4043 : {
4044 : u32 wptr;
4045 : u32 rptr;
4046 : u32 src_id, src_data;
4047 : u32 ring_index;
4048 : bool queue_hotplug = false;
4049 : bool queue_hdmi = false;
4050 : bool queue_thermal = false;
4051 :
4052 0 : if (!rdev->ih.enabled || rdev->shutdown)
4053 0 : return IRQ_NONE;
4054 :
4055 : /* No MSIs, need a dummy read to flush PCI DMAs */
4056 0 : if (!rdev->msi_enabled)
4057 0 : RREG32(IH_RB_WPTR);
4058 :
4059 0 : wptr = r600_get_ih_wptr(rdev);
4060 :
4061 0 : if (wptr == rdev->ih.rptr)
4062 0 : return IRQ_NONE;
4063 : restart_ih:
4064 : /* is somebody else already processing irqs? */
4065 0 : if (atomic_xchg(&rdev->ih.lock, 1))
4066 0 : return IRQ_NONE;
4067 :
4068 0 : rptr = rdev->ih.rptr;
4069 : DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4070 :
4071 : /* Order reading of wptr vs. reading of IH ring data */
4072 0 : rmb();
4073 :
4074 : /* display interrupts */
4075 0 : r600_irq_ack(rdev);
4076 :
4077 0 : while (rptr != wptr) {
4078 : /* wptr/rptr are in bytes! */
4079 0 : ring_index = rptr / 4;
4080 0 : src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4081 0 : src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
4082 :
4083 0 : switch (src_id) {
4084 : case 1: /* D1 vblank/vline */
4085 0 : switch (src_data) {
4086 : case 0: /* D1 vblank */
4087 0 : if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4088 : DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4089 :
4090 0 : if (rdev->irq.crtc_vblank_int[0]) {
4091 0 : drm_handle_vblank(rdev->ddev, 0);
4092 0 : rdev->pm.vblank_sync = true;
4093 0 : wake_up(&rdev->irq.vblank_queue);
4094 0 : }
4095 0 : if (atomic_read(&rdev->irq.pflip[0]))
4096 0 : radeon_crtc_handle_vblank(rdev, 0);
4097 0 : rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4098 : DRM_DEBUG("IH: D1 vblank\n");
4099 :
4100 0 : break;
4101 : case 1: /* D1 vline */
4102 0 : if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4103 : DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4104 :
4105 0 : rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4106 : DRM_DEBUG("IH: D1 vline\n");
4107 :
4108 0 : break;
4109 : default:
4110 : DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4111 : break;
4112 : }
4113 : break;
4114 : case 5: /* D2 vblank/vline */
4115 0 : switch (src_data) {
4116 : case 0: /* D2 vblank */
4117 0 : if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4118 : DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4119 :
4120 0 : if (rdev->irq.crtc_vblank_int[1]) {
4121 0 : drm_handle_vblank(rdev->ddev, 1);
4122 0 : rdev->pm.vblank_sync = true;
4123 0 : wake_up(&rdev->irq.vblank_queue);
4124 0 : }
4125 0 : if (atomic_read(&rdev->irq.pflip[1]))
4126 0 : radeon_crtc_handle_vblank(rdev, 1);
4127 0 : rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4128 : DRM_DEBUG("IH: D2 vblank\n");
4129 :
4130 0 : break;
4131 : case 1: /* D1 vline */
4132 0 : if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4133 : DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4134 :
4135 0 : rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4136 : DRM_DEBUG("IH: D2 vline\n");
4137 :
4138 0 : break;
4139 : default:
4140 : DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4141 : break;
4142 : }
4143 : break;
4144 : case 9: /* D1 pflip */
4145 : DRM_DEBUG("IH: D1 flip\n");
4146 0 : if (radeon_use_pflipirq > 0)
4147 0 : radeon_crtc_handle_flip(rdev, 0);
4148 : break;
4149 : case 11: /* D2 pflip */
4150 : DRM_DEBUG("IH: D2 flip\n");
4151 0 : if (radeon_use_pflipirq > 0)
4152 0 : radeon_crtc_handle_flip(rdev, 1);
4153 : break;
4154 : case 19: /* HPD/DAC hotplug */
4155 0 : switch (src_data) {
4156 : case 0:
4157 0 : if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4158 : DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4159 :
4160 0 : rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4161 : queue_hotplug = true;
4162 : DRM_DEBUG("IH: HPD1\n");
4163 0 : break;
4164 : case 1:
4165 0 : if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4166 : DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4167 :
4168 0 : rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4169 : queue_hotplug = true;
4170 : DRM_DEBUG("IH: HPD2\n");
4171 0 : break;
4172 : case 4:
4173 0 : if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4174 : DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4175 :
4176 0 : rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4177 : queue_hotplug = true;
4178 : DRM_DEBUG("IH: HPD3\n");
4179 0 : break;
4180 : case 5:
4181 0 : if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4182 : DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4183 :
4184 0 : rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4185 : queue_hotplug = true;
4186 : DRM_DEBUG("IH: HPD4\n");
4187 0 : break;
4188 : case 10:
4189 0 : if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4190 : DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4191 :
4192 0 : rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4193 : queue_hotplug = true;
4194 : DRM_DEBUG("IH: HPD5\n");
4195 0 : break;
4196 : case 12:
4197 0 : if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4198 : DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4199 :
4200 0 : rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4201 : queue_hotplug = true;
4202 : DRM_DEBUG("IH: HPD6\n");
4203 :
4204 0 : break;
4205 : default:
4206 : DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4207 : break;
4208 : }
4209 : break;
4210 : case 21: /* hdmi */
4211 0 : switch (src_data) {
4212 : case 4:
4213 0 : if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4214 : DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4215 :
4216 0 : rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4217 : queue_hdmi = true;
4218 : DRM_DEBUG("IH: HDMI0\n");
4219 :
4220 0 : break;
4221 : case 5:
4222 0 : if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4223 : DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4224 :
4225 0 : rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4226 : queue_hdmi = true;
4227 : DRM_DEBUG("IH: HDMI1\n");
4228 :
4229 0 : break;
4230 : default:
4231 0 : DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4232 0 : break;
4233 : }
4234 : break;
4235 : case 124: /* UVD */
4236 : DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4237 0 : radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4238 0 : break;
4239 : case 176: /* CP_INT in ring buffer */
4240 : case 177: /* CP_INT in IB1 */
4241 : case 178: /* CP_INT in IB2 */
4242 : DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4243 0 : radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4244 0 : break;
4245 : case 181: /* CP EOP event */
4246 : DRM_DEBUG("IH: CP EOP\n");
4247 0 : radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4248 0 : break;
4249 : case 224: /* DMA trap event */
4250 : DRM_DEBUG("IH: DMA trap\n");
4251 0 : radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4252 0 : break;
4253 : case 230: /* thermal low to high */
4254 : DRM_DEBUG("IH: thermal low to high\n");
4255 0 : rdev->pm.dpm.thermal.high_to_low = false;
4256 : queue_thermal = true;
4257 0 : break;
4258 : case 231: /* thermal high to low */
4259 : DRM_DEBUG("IH: thermal high to low\n");
4260 0 : rdev->pm.dpm.thermal.high_to_low = true;
4261 : queue_thermal = true;
4262 0 : break;
4263 : case 233: /* GUI IDLE */
4264 : DRM_DEBUG("IH: GUI idle\n");
4265 : break;
4266 : default:
4267 : DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4268 : break;
4269 : }
4270 :
4271 : /* wptr/rptr are in bytes! */
4272 0 : rptr += 16;
4273 0 : rptr &= rdev->ih.ptr_mask;
4274 0 : WREG32(IH_RB_RPTR, rptr);
4275 : }
4276 0 : if (queue_hotplug)
4277 0 : schedule_delayed_work(&rdev->hotplug_work, 0);
4278 0 : if (queue_hdmi)
4279 0 : schedule_work(&rdev->audio_work);
4280 0 : if (queue_thermal && rdev->pm.dpm_enabled)
4281 0 : schedule_work(&rdev->pm.dpm.thermal.work);
4282 0 : rdev->ih.rptr = rptr;
4283 0 : atomic_set(&rdev->ih.lock, 0);
4284 :
4285 : /* make sure wptr hasn't changed while processing */
4286 0 : wptr = r600_get_ih_wptr(rdev);
4287 0 : if (wptr != rptr)
4288 0 : goto restart_ih;
4289 :
4290 0 : return IRQ_HANDLED;
4291 0 : }
4292 :
4293 : /*
4294 : * Debugfs info
4295 : */
4296 : #if defined(CONFIG_DEBUG_FS)
4297 :
4298 : static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4299 : {
4300 : struct drm_info_node *node = (struct drm_info_node *) m->private;
4301 : struct drm_device *dev = node->minor->dev;
4302 : struct radeon_device *rdev = dev->dev_private;
4303 :
4304 : DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4305 : DREG32_SYS(m, rdev, VM_L2_STATUS);
4306 : return 0;
4307 : }
4308 :
4309 : static struct drm_info_list r600_mc_info_list[] = {
4310 : {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4311 : };
4312 : #endif
4313 :
4314 0 : int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4315 : {
4316 : #if defined(CONFIG_DEBUG_FS)
4317 : return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4318 : #else
4319 0 : return 0;
4320 : #endif
4321 : }
4322 :
4323 : /**
4324 : * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
4325 : * rdev: radeon device structure
4326 : *
4327 : * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4328 : * through the ring buffer. This leads to corruption in rendering, see
4329 : * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4330 : * directly perform the HDP flush by writing the register through MMIO.
4331 : */
4332 0 : void r600_mmio_hdp_flush(struct radeon_device *rdev)
4333 : {
4334 : /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4335 : * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4336 : * This seems to cause problems on some AGP cards. Just use the old
4337 : * method for them.
4338 : */
4339 0 : if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4340 0 : rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4341 0 : void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4342 : u32 tmp;
4343 :
4344 0 : WREG32(HDP_DEBUG1, 0);
4345 0 : tmp = readl((void __iomem *)ptr);
4346 0 : } else
4347 0 : WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4348 0 : }
4349 :
4350 0 : void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4351 : {
4352 : u32 link_width_cntl, mask;
4353 :
4354 0 : if (rdev->flags & RADEON_IS_IGP)
4355 0 : return;
4356 :
4357 0 : if (!(rdev->flags & RADEON_IS_PCIE))
4358 0 : return;
4359 :
4360 : /* x2 cards have a special sequence */
4361 0 : if (ASIC_IS_X2(rdev))
4362 0 : return;
4363 :
4364 0 : radeon_gui_idle(rdev);
4365 :
4366 0 : switch (lanes) {
4367 : case 0:
4368 : mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4369 0 : break;
4370 : case 1:
4371 : mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4372 0 : break;
4373 : case 2:
4374 : mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4375 0 : break;
4376 : case 4:
4377 : mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4378 0 : break;
4379 : case 8:
4380 : mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4381 0 : break;
4382 : case 12:
4383 : /* not actually supported */
4384 : mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4385 0 : break;
4386 : case 16:
4387 : mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4388 0 : break;
4389 : default:
4390 0 : DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4391 0 : return;
4392 : }
4393 :
4394 0 : link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4395 0 : link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4396 0 : link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4397 0 : link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4398 : R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4399 :
4400 0 : WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4401 0 : }
4402 :
4403 0 : int r600_get_pcie_lanes(struct radeon_device *rdev)
4404 : {
4405 : u32 link_width_cntl;
4406 :
4407 0 : if (rdev->flags & RADEON_IS_IGP)
4408 0 : return 0;
4409 :
4410 0 : if (!(rdev->flags & RADEON_IS_PCIE))
4411 0 : return 0;
4412 :
4413 : /* x2 cards have a special sequence */
4414 0 : if (ASIC_IS_X2(rdev))
4415 0 : return 0;
4416 :
4417 0 : radeon_gui_idle(rdev);
4418 :
4419 0 : link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4420 :
4421 0 : switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4422 : case RADEON_PCIE_LC_LINK_WIDTH_X1:
4423 0 : return 1;
4424 : case RADEON_PCIE_LC_LINK_WIDTH_X2:
4425 0 : return 2;
4426 : case RADEON_PCIE_LC_LINK_WIDTH_X4:
4427 0 : return 4;
4428 : case RADEON_PCIE_LC_LINK_WIDTH_X8:
4429 0 : return 8;
4430 : case RADEON_PCIE_LC_LINK_WIDTH_X12:
4431 : /* not actually supported */
4432 0 : return 12;
4433 : case RADEON_PCIE_LC_LINK_WIDTH_X0:
4434 : case RADEON_PCIE_LC_LINK_WIDTH_X16:
4435 : default:
4436 0 : return 16;
4437 : }
4438 0 : }
4439 :
4440 0 : static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4441 : {
4442 : u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4443 : u16 link_cntl2;
4444 0 : u32 mask;
4445 :
4446 0 : if (radeon_pcie_gen2 == 0)
4447 0 : return;
4448 :
4449 0 : if (rdev->flags & RADEON_IS_IGP)
4450 0 : return;
4451 :
4452 0 : if (!(rdev->flags & RADEON_IS_PCIE))
4453 0 : return;
4454 :
4455 : /* x2 cards have a special sequence */
4456 0 : if (ASIC_IS_X2(rdev))
4457 0 : return;
4458 :
4459 : /* only RV6xx+ chips are supported */
4460 0 : if (rdev->family <= CHIP_R600)
4461 0 : return;
4462 :
4463 0 : if (drm_pcie_get_speed_cap_mask(rdev->ddev, &mask))
4464 0 : return;
4465 :
4466 0 : if (!(mask & (DRM_PCIE_SPEED_50|DRM_PCIE_SPEED_80)))
4467 0 : return;
4468 :
4469 0 : speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4470 0 : if (speed_cntl & LC_CURRENT_DATA_RATE) {
4471 : DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4472 0 : return;
4473 : }
4474 :
4475 : DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4476 :
4477 : /* 55 nm r6xx asics */
4478 0 : if ((rdev->family == CHIP_RV670) ||
4479 0 : (rdev->family == CHIP_RV620) ||
4480 0 : (rdev->family == CHIP_RV635)) {
4481 : /* advertise upconfig capability */
4482 0 : link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4483 0 : link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4484 0 : WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4485 0 : link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4486 0 : if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4487 0 : lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4488 0 : link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4489 : LC_RECONFIG_ARC_MISSING_ESCAPE);
4490 0 : link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4491 0 : WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4492 0 : } else {
4493 0 : link_width_cntl |= LC_UPCONFIGURE_DIS;
4494 0 : WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4495 : }
4496 : }
4497 :
4498 0 : speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4499 0 : if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4500 0 : (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4501 :
4502 : /* 55 nm r6xx asics */
4503 0 : if ((rdev->family == CHIP_RV670) ||
4504 0 : (rdev->family == CHIP_RV620) ||
4505 0 : (rdev->family == CHIP_RV635)) {
4506 0 : WREG32(MM_CFGREGS_CNTL, 0x8);
4507 0 : link_cntl2 = RREG32(0x4088);
4508 0 : WREG32(MM_CFGREGS_CNTL, 0);
4509 : /* not supported yet */
4510 0 : if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4511 0 : return;
4512 : }
4513 :
4514 0 : speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4515 0 : speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4516 0 : speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4517 0 : speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4518 0 : speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4519 0 : WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4520 :
4521 0 : tmp = RREG32(0x541c);
4522 0 : WREG32(0x541c, tmp | 0x8);
4523 0 : WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4524 0 : link_cntl2 = RREG16(0x4088);
4525 0 : link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4526 0 : link_cntl2 |= 0x2;
4527 0 : WREG16(0x4088, link_cntl2);
4528 0 : WREG32(MM_CFGREGS_CNTL, 0);
4529 :
4530 0 : if ((rdev->family == CHIP_RV670) ||
4531 0 : (rdev->family == CHIP_RV620) ||
4532 0 : (rdev->family == CHIP_RV635)) {
4533 0 : training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4534 0 : training_cntl &= ~LC_POINT_7_PLUS_EN;
4535 0 : WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4536 0 : } else {
4537 0 : speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4538 0 : speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4539 0 : WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4540 : }
4541 :
4542 0 : speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4543 0 : speed_cntl |= LC_GEN2_EN_STRAP;
4544 0 : WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4545 :
4546 0 : } else {
4547 0 : link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4548 : /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4549 : if (1)
4550 0 : link_width_cntl |= LC_UPCONFIGURE_DIS;
4551 : else
4552 : link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4553 0 : WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4554 : }
4555 0 : }
4556 :
4557 : /**
4558 : * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4559 : *
4560 : * @rdev: radeon_device pointer
4561 : *
4562 : * Fetches a GPU clock counter snapshot (R6xx-cayman).
4563 : * Returns the 64 bit clock counter snapshot.
4564 : */
4565 0 : uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4566 : {
4567 : uint64_t clock;
4568 :
4569 0 : mutex_lock(&rdev->gpu_clock_mutex);
4570 0 : WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4571 0 : clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4572 0 : ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4573 0 : mutex_unlock(&rdev->gpu_clock_mutex);
4574 0 : return clock;
4575 : }
|