Line data Source code
1 : /*
2 : * Permission is hereby granted, free of charge, to any person obtaining a
3 : * copy of this software and associated documentation files (the "Software"),
4 : * to deal in the Software without restriction, including without limitation
5 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 : * and/or sell copies of the Software, and to permit persons to whom the
7 : * Software is furnished to do so, subject to the following conditions:
8 : *
9 : * The above copyright notice and this permission notice shall be included in
10 : * all copies or substantial portions of the Software.
11 : *
12 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 : * OTHER DEALINGS IN THE SOFTWARE.
19 : *
20 : * Authors: Rafał Miłecki <zajec5@gmail.com>
21 : * Alex Deucher <alexdeucher@gmail.com>
22 : */
23 : #include <dev/pci/drm/drmP.h>
24 : #include "radeon.h"
25 : #include "avivod.h"
26 : #include "atom.h"
27 : #include "r600_dpm.h"
28 :
29 : #define RADEON_IDLE_LOOP_MS 100
30 : #define RADEON_RECLOCK_DELAY_MS 200
31 : #define RADEON_WAIT_VBLANK_TIMEOUT 200
32 :
33 : #ifdef DRMDEBUG
34 : static const char *radeon_pm_state_type_name[5] = {
35 : "",
36 : "Powersave",
37 : "Battery",
38 : "Balanced",
39 : "Performance",
40 : };
41 : #endif
42 :
43 : static void radeon_dynpm_idle_work_handler(struct work_struct *work);
44 : static int radeon_debugfs_pm_init(struct radeon_device *rdev);
45 : static bool radeon_pm_in_vbl(struct radeon_device *rdev);
46 : static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
47 : static void radeon_pm_update_profile(struct radeon_device *rdev);
48 : static void radeon_pm_set_clocks(struct radeon_device *rdev);
49 :
50 0 : int radeon_pm_get_type_index(struct radeon_device *rdev,
51 : enum radeon_pm_state_type ps_type,
52 : int instance)
53 : {
54 : int i;
55 : int found_instance = -1;
56 :
57 0 : for (i = 0; i < rdev->pm.num_power_states; i++) {
58 0 : if (rdev->pm.power_state[i].type == ps_type) {
59 0 : found_instance++;
60 0 : if (found_instance == instance)
61 0 : return i;
62 : }
63 : }
64 : /* return default if no match */
65 0 : return rdev->pm.default_power_state_index;
66 0 : }
67 :
68 0 : void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
69 : {
70 0 : if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
71 0 : mutex_lock(&rdev->pm.mutex);
72 0 : if (power_supply_is_system_supplied() > 0)
73 0 : rdev->pm.dpm.ac_power = true;
74 : else
75 0 : rdev->pm.dpm.ac_power = false;
76 0 : if (rdev->family == CHIP_ARUBA) {
77 0 : if (rdev->asic->dpm.enable_bapm)
78 0 : radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
79 : }
80 0 : mutex_unlock(&rdev->pm.mutex);
81 0 : } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
82 0 : if (rdev->pm.profile == PM_PROFILE_AUTO) {
83 0 : mutex_lock(&rdev->pm.mutex);
84 0 : radeon_pm_update_profile(rdev);
85 0 : radeon_pm_set_clocks(rdev);
86 0 : mutex_unlock(&rdev->pm.mutex);
87 0 : }
88 : }
89 0 : }
90 :
91 0 : static void radeon_pm_update_profile(struct radeon_device *rdev)
92 : {
93 0 : switch (rdev->pm.profile) {
94 : case PM_PROFILE_DEFAULT:
95 0 : rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
96 0 : break;
97 : case PM_PROFILE_AUTO:
98 0 : if (power_supply_is_system_supplied() > 0) {
99 0 : if (rdev->pm.active_crtc_count > 1)
100 0 : rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
101 : else
102 0 : rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
103 : } else {
104 0 : if (rdev->pm.active_crtc_count > 1)
105 0 : rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
106 : else
107 0 : rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
108 : }
109 : break;
110 : case PM_PROFILE_LOW:
111 0 : if (rdev->pm.active_crtc_count > 1)
112 0 : rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
113 : else
114 0 : rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
115 : break;
116 : case PM_PROFILE_MID:
117 0 : if (rdev->pm.active_crtc_count > 1)
118 0 : rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
119 : else
120 0 : rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
121 : break;
122 : case PM_PROFILE_HIGH:
123 0 : if (rdev->pm.active_crtc_count > 1)
124 0 : rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
125 : else
126 0 : rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
127 : break;
128 : }
129 :
130 0 : if (rdev->pm.active_crtc_count == 0) {
131 0 : rdev->pm.requested_power_state_index =
132 0 : rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
133 0 : rdev->pm.requested_clock_mode_index =
134 0 : rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
135 0 : } else {
136 0 : rdev->pm.requested_power_state_index =
137 0 : rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
138 0 : rdev->pm.requested_clock_mode_index =
139 0 : rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
140 : }
141 0 : }
142 :
143 0 : static void radeon_unmap_vram_bos(struct radeon_device *rdev)
144 : {
145 : struct radeon_bo *bo, *n;
146 :
147 0 : if (list_empty(&rdev->gem.objects))
148 0 : return;
149 :
150 0 : list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
151 0 : if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
152 0 : ttm_bo_unmap_virtual(&bo->tbo);
153 : }
154 0 : }
155 :
156 0 : static void radeon_sync_with_vblank(struct radeon_device *rdev)
157 : {
158 0 : if (rdev->pm.active_crtcs) {
159 0 : rdev->pm.vblank_sync = false;
160 0 : wait_event_timeout(
161 : rdev->irq.vblank_queue, rdev->pm.vblank_sync,
162 : msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
163 0 : }
164 0 : }
165 :
166 0 : static void radeon_set_power_state(struct radeon_device *rdev)
167 : {
168 : u32 sclk, mclk;
169 : bool misc_after = false;
170 :
171 0 : if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
172 0 : (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
173 0 : return;
174 :
175 0 : if (radeon_gui_idle(rdev)) {
176 0 : sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
177 0 : clock_info[rdev->pm.requested_clock_mode_index].sclk;
178 0 : if (sclk > rdev->pm.default_sclk)
179 0 : sclk = rdev->pm.default_sclk;
180 :
181 : /* starting with BTC, there is one state that is used for both
182 : * MH and SH. Difference is that we always use the high clock index for
183 : * mclk and vddci.
184 : */
185 0 : if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
186 0 : (rdev->family >= CHIP_BARTS) &&
187 0 : rdev->pm.active_crtc_count &&
188 0 : ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
189 0 : (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
190 0 : mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
191 0 : clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
192 : else
193 0 : mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
194 0 : clock_info[rdev->pm.requested_clock_mode_index].mclk;
195 :
196 0 : if (mclk > rdev->pm.default_mclk)
197 0 : mclk = rdev->pm.default_mclk;
198 :
199 : /* upvolt before raising clocks, downvolt after lowering clocks */
200 0 : if (sclk < rdev->pm.current_sclk)
201 0 : misc_after = true;
202 :
203 0 : radeon_sync_with_vblank(rdev);
204 :
205 0 : if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
206 0 : if (!radeon_pm_in_vbl(rdev))
207 0 : return;
208 : }
209 :
210 0 : radeon_pm_prepare(rdev);
211 :
212 0 : if (!misc_after)
213 : /* voltage, pcie lanes, etc.*/
214 0 : radeon_pm_misc(rdev);
215 :
216 : /* set engine clock */
217 0 : if (sclk != rdev->pm.current_sclk) {
218 0 : radeon_pm_debug_check_in_vbl(rdev, false);
219 0 : radeon_set_engine_clock(rdev, sclk);
220 0 : radeon_pm_debug_check_in_vbl(rdev, true);
221 0 : rdev->pm.current_sclk = sclk;
222 : DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
223 0 : }
224 :
225 : /* set memory clock */
226 0 : if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
227 0 : radeon_pm_debug_check_in_vbl(rdev, false);
228 0 : radeon_set_memory_clock(rdev, mclk);
229 0 : radeon_pm_debug_check_in_vbl(rdev, true);
230 0 : rdev->pm.current_mclk = mclk;
231 : DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
232 0 : }
233 :
234 0 : if (misc_after)
235 : /* voltage, pcie lanes, etc.*/
236 0 : radeon_pm_misc(rdev);
237 :
238 0 : radeon_pm_finish(rdev);
239 :
240 0 : rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
241 0 : rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
242 0 : } else
243 : DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
244 0 : }
245 :
246 0 : static void radeon_pm_set_clocks(struct radeon_device *rdev)
247 : {
248 : int i, r;
249 :
250 : /* no need to take locks, etc. if nothing's going to change */
251 0 : if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
252 0 : (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
253 0 : return;
254 :
255 0 : down_write(&rdev->pm.mclk_lock);
256 0 : mutex_lock(&rdev->ring_lock);
257 :
258 : /* wait for the rings to drain */
259 0 : for (i = 0; i < RADEON_NUM_RINGS; i++) {
260 0 : struct radeon_ring *ring = &rdev->ring[i];
261 0 : if (!ring->ready) {
262 0 : continue;
263 : }
264 0 : r = radeon_fence_wait_empty(rdev, i);
265 0 : if (r) {
266 : /* needs a GPU reset dont reset here */
267 0 : mutex_unlock(&rdev->ring_lock);
268 0 : up_write(&rdev->pm.mclk_lock);
269 0 : return;
270 : }
271 0 : }
272 :
273 0 : radeon_unmap_vram_bos(rdev);
274 :
275 0 : if (rdev->irq.installed) {
276 0 : for (i = 0; i < rdev->num_crtc; i++) {
277 0 : if (rdev->pm.active_crtcs & (1 << i)) {
278 0 : rdev->pm.req_vblank |= (1 << i);
279 0 : drm_vblank_get(rdev->ddev, i);
280 0 : }
281 : }
282 : }
283 :
284 0 : radeon_set_power_state(rdev);
285 :
286 0 : if (rdev->irq.installed) {
287 0 : for (i = 0; i < rdev->num_crtc; i++) {
288 0 : if (rdev->pm.req_vblank & (1 << i)) {
289 0 : rdev->pm.req_vblank &= ~(1 << i);
290 0 : drm_vblank_put(rdev->ddev, i);
291 0 : }
292 : }
293 : }
294 :
295 : /* update display watermarks based on new power state */
296 0 : radeon_update_bandwidth_info(rdev);
297 0 : if (rdev->pm.active_crtc_count)
298 0 : radeon_bandwidth_update(rdev);
299 :
300 0 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
301 :
302 0 : mutex_unlock(&rdev->ring_lock);
303 0 : up_write(&rdev->pm.mclk_lock);
304 0 : }
305 :
306 0 : static void radeon_pm_print_states(struct radeon_device *rdev)
307 : {
308 : int i, j;
309 : struct radeon_power_state *power_state;
310 : struct radeon_pm_clock_info *clock_info;
311 :
312 : DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
313 0 : for (i = 0; i < rdev->pm.num_power_states; i++) {
314 0 : power_state = &rdev->pm.power_state[i];
315 : DRM_DEBUG_DRIVER("State %d: %s\n", i,
316 : radeon_pm_state_type_name[power_state->type]);
317 0 : if (i == rdev->pm.default_power_state_index)
318 : DRM_DEBUG_DRIVER("\tDefault");
319 0 : if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
320 : DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
321 0 : if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
322 : DRM_DEBUG_DRIVER("\tSingle display only\n");
323 : DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
324 0 : for (j = 0; j < power_state->num_clock_modes; j++) {
325 0 : clock_info = &(power_state->clock_info[j]);
326 0 : if (rdev->flags & RADEON_IS_IGP)
327 : DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
328 : j,
329 : clock_info->sclk * 10);
330 : else
331 : DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
332 : j,
333 : clock_info->sclk * 10,
334 : clock_info->mclk * 10,
335 : clock_info->voltage.voltage);
336 : }
337 : }
338 0 : }
339 :
340 : #ifdef notyet
341 : static ssize_t radeon_get_pm_profile(struct device *dev,
342 : struct device_attribute *attr,
343 : char *buf)
344 : {
345 : struct drm_device *ddev = dev_get_drvdata(dev);
346 : struct radeon_device *rdev = ddev->dev_private;
347 : int cp = rdev->pm.profile;
348 :
349 : return snprintf(buf, PAGE_SIZE, "%s\n",
350 : (cp == PM_PROFILE_AUTO) ? "auto" :
351 : (cp == PM_PROFILE_LOW) ? "low" :
352 : (cp == PM_PROFILE_MID) ? "mid" :
353 : (cp == PM_PROFILE_HIGH) ? "high" : "default");
354 : }
355 :
356 : static ssize_t radeon_set_pm_profile(struct device *dev,
357 : struct device_attribute *attr,
358 : const char *buf,
359 : size_t count)
360 : {
361 : struct drm_device *ddev = dev_get_drvdata(dev);
362 : struct radeon_device *rdev = ddev->dev_private;
363 :
364 : /* Can't set profile when the card is off */
365 : #ifdef notyet
366 : if ((rdev->flags & RADEON_IS_PX) &&
367 : (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
368 : return -EINVAL;
369 : #endif
370 :
371 : mutex_lock(&rdev->pm.mutex);
372 : if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
373 : if (strncmp("default", buf, strlen("default")) == 0)
374 : rdev->pm.profile = PM_PROFILE_DEFAULT;
375 : else if (strncmp("auto", buf, strlen("auto")) == 0)
376 : rdev->pm.profile = PM_PROFILE_AUTO;
377 : else if (strncmp("low", buf, strlen("low")) == 0)
378 : rdev->pm.profile = PM_PROFILE_LOW;
379 : else if (strncmp("mid", buf, strlen("mid")) == 0)
380 : rdev->pm.profile = PM_PROFILE_MID;
381 : else if (strncmp("high", buf, strlen("high")) == 0)
382 : rdev->pm.profile = PM_PROFILE_HIGH;
383 : else {
384 : count = -EINVAL;
385 : goto fail;
386 : }
387 : radeon_pm_update_profile(rdev);
388 : radeon_pm_set_clocks(rdev);
389 : } else
390 : count = -EINVAL;
391 :
392 : fail:
393 : mutex_unlock(&rdev->pm.mutex);
394 :
395 : return count;
396 : }
397 :
398 : static ssize_t radeon_get_pm_method(struct device *dev,
399 : struct device_attribute *attr,
400 : char *buf)
401 : {
402 : struct drm_device *ddev = dev_get_drvdata(dev);
403 : struct radeon_device *rdev = ddev->dev_private;
404 : int pm = rdev->pm.pm_method;
405 :
406 : return snprintf(buf, PAGE_SIZE, "%s\n",
407 : (pm == PM_METHOD_DYNPM) ? "dynpm" :
408 : (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
409 : }
410 :
411 : static ssize_t radeon_set_pm_method(struct device *dev,
412 : struct device_attribute *attr,
413 : const char *buf,
414 : size_t count)
415 : {
416 : struct drm_device *ddev = dev_get_drvdata(dev);
417 : struct radeon_device *rdev = ddev->dev_private;
418 :
419 : #ifdef notyet
420 : /* Can't set method when the card is off */
421 : if ((rdev->flags & RADEON_IS_PX) &&
422 : (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
423 : count = -EINVAL;
424 : goto fail;
425 : }
426 : #endif
427 :
428 : /* we don't support the legacy modes with dpm */
429 : if (rdev->pm.pm_method == PM_METHOD_DPM) {
430 : count = -EINVAL;
431 : goto fail;
432 : }
433 :
434 : if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
435 : mutex_lock(&rdev->pm.mutex);
436 : rdev->pm.pm_method = PM_METHOD_DYNPM;
437 : rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
438 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
439 : mutex_unlock(&rdev->pm.mutex);
440 : } else if (strncmp("profile", buf, strlen("profile")) == 0) {
441 : mutex_lock(&rdev->pm.mutex);
442 : /* disable dynpm */
443 : rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
444 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
445 : rdev->pm.pm_method = PM_METHOD_PROFILE;
446 : mutex_unlock(&rdev->pm.mutex);
447 : cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
448 : } else {
449 : count = -EINVAL;
450 : goto fail;
451 : }
452 : radeon_pm_compute_clocks(rdev);
453 : fail:
454 : return count;
455 : }
456 :
457 : static ssize_t radeon_get_dpm_state(struct device *dev,
458 : struct device_attribute *attr,
459 : char *buf)
460 : {
461 : struct drm_device *ddev = dev_get_drvdata(dev);
462 : struct radeon_device *rdev = ddev->dev_private;
463 : enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
464 :
465 : return snprintf(buf, PAGE_SIZE, "%s\n",
466 : (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
467 : (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
468 : }
469 :
470 : static ssize_t radeon_set_dpm_state(struct device *dev,
471 : struct device_attribute *attr,
472 : const char *buf,
473 : size_t count)
474 : {
475 : struct drm_device *ddev = dev_get_drvdata(dev);
476 : struct radeon_device *rdev = ddev->dev_private;
477 :
478 : mutex_lock(&rdev->pm.mutex);
479 : if (strncmp("battery", buf, strlen("battery")) == 0)
480 : rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
481 : else if (strncmp("balanced", buf, strlen("balanced")) == 0)
482 : rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
483 : else if (strncmp("performance", buf, strlen("performance")) == 0)
484 : rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
485 : else {
486 : mutex_unlock(&rdev->pm.mutex);
487 : count = -EINVAL;
488 : goto fail;
489 : }
490 : mutex_unlock(&rdev->pm.mutex);
491 :
492 : /* Can't set dpm state when the card is off */
493 : #ifdef notyet
494 : if (!(rdev->flags & RADEON_IS_PX) ||
495 : (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
496 : #endif
497 : radeon_pm_compute_clocks(rdev);
498 :
499 : fail:
500 : return count;
501 : }
502 :
503 : static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
504 : struct device_attribute *attr,
505 : char *buf)
506 : {
507 : struct drm_device *ddev = dev_get_drvdata(dev);
508 : struct radeon_device *rdev = ddev->dev_private;
509 : enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
510 :
511 : #ifdef notyet
512 : if ((rdev->flags & RADEON_IS_PX) &&
513 : (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
514 : return snprintf(buf, PAGE_SIZE, "off\n");
515 : #endif
516 :
517 : return snprintf(buf, PAGE_SIZE, "%s\n",
518 : (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
519 : (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
520 : }
521 :
522 : static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
523 : struct device_attribute *attr,
524 : const char *buf,
525 : size_t count)
526 : {
527 : struct drm_device *ddev = dev_get_drvdata(dev);
528 : struct radeon_device *rdev = ddev->dev_private;
529 : enum radeon_dpm_forced_level level;
530 : int ret = 0;
531 :
532 : /* Can't force performance level when the card is off */
533 : #ifdef notyet
534 : if ((rdev->flags & RADEON_IS_PX) &&
535 : (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
536 : return -EINVAL;
537 : #endif
538 :
539 : mutex_lock(&rdev->pm.mutex);
540 : if (strncmp("low", buf, strlen("low")) == 0) {
541 : level = RADEON_DPM_FORCED_LEVEL_LOW;
542 : } else if (strncmp("high", buf, strlen("high")) == 0) {
543 : level = RADEON_DPM_FORCED_LEVEL_HIGH;
544 : } else if (strncmp("auto", buf, strlen("auto")) == 0) {
545 : level = RADEON_DPM_FORCED_LEVEL_AUTO;
546 : } else {
547 : count = -EINVAL;
548 : goto fail;
549 : }
550 : if (rdev->asic->dpm.force_performance_level) {
551 : if (rdev->pm.dpm.thermal_active) {
552 : count = -EINVAL;
553 : goto fail;
554 : }
555 : ret = radeon_dpm_force_performance_level(rdev, level);
556 : if (ret)
557 : count = -EINVAL;
558 : }
559 : fail:
560 : mutex_unlock(&rdev->pm.mutex);
561 :
562 : return count;
563 : }
564 : #endif
565 :
566 : #ifdef notyet
567 : static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
568 : struct device_attribute *attr,
569 : char *buf)
570 : {
571 : struct radeon_device *rdev = dev_get_drvdata(dev);
572 : u32 pwm_mode = 0;
573 :
574 : if (rdev->asic->dpm.fan_ctrl_get_mode)
575 : pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
576 :
577 : /* never 0 (full-speed), fuse or smc-controlled always */
578 : return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
579 : }
580 :
581 : static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
582 : struct device_attribute *attr,
583 : const char *buf,
584 : size_t count)
585 : {
586 : struct radeon_device *rdev = dev_get_drvdata(dev);
587 : int err;
588 : int value;
589 :
590 : if(!rdev->asic->dpm.fan_ctrl_set_mode)
591 : return -EINVAL;
592 :
593 : err = kstrtoint(buf, 10, &value);
594 : if (err)
595 : return err;
596 :
597 : switch (value) {
598 : case 1: /* manual, percent-based */
599 : rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
600 : break;
601 : default: /* disable */
602 : rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
603 : break;
604 : }
605 :
606 : return count;
607 : }
608 :
609 : static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
610 : struct device_attribute *attr,
611 : char *buf)
612 : {
613 : return sprintf(buf, "%i\n", 0);
614 : }
615 :
616 : static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
617 : struct device_attribute *attr,
618 : char *buf)
619 : {
620 : return sprintf(buf, "%i\n", 255);
621 : }
622 :
623 : static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
624 : struct device_attribute *attr,
625 : const char *buf, size_t count)
626 : {
627 : struct radeon_device *rdev = dev_get_drvdata(dev);
628 : int err;
629 : u32 value;
630 :
631 : err = kstrtou32(buf, 10, &value);
632 : if (err)
633 : return err;
634 :
635 : value = (value * 100) / 255;
636 :
637 : err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
638 : if (err)
639 : return err;
640 :
641 : return count;
642 : }
643 :
644 : static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
645 : struct device_attribute *attr,
646 : char *buf)
647 : {
648 : struct radeon_device *rdev = dev_get_drvdata(dev);
649 : int err;
650 : u32 speed;
651 :
652 : err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
653 : if (err)
654 : return err;
655 :
656 : speed = (speed * 255) / 100;
657 :
658 : return sprintf(buf, "%i\n", speed);
659 : }
660 :
661 : static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
662 : static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
663 : static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
664 : static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
665 : radeon_get_dpm_forced_performance_level,
666 : radeon_set_dpm_forced_performance_level);
667 :
668 : static ssize_t radeon_hwmon_show_temp(struct device *dev,
669 : struct device_attribute *attr,
670 : char *buf)
671 : {
672 : struct radeon_device *rdev = dev_get_drvdata(dev);
673 : struct drm_device *ddev = rdev->ddev;
674 : int temp;
675 :
676 : /* Can't get temperature when the card is off */
677 : if ((rdev->flags & RADEON_IS_PX) &&
678 : (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
679 : return -EINVAL;
680 :
681 : if (rdev->asic->pm.get_temperature)
682 : temp = radeon_get_temperature(rdev);
683 : else
684 : temp = 0;
685 :
686 : return snprintf(buf, PAGE_SIZE, "%d\n", temp);
687 : }
688 :
689 : static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
690 : struct device_attribute *attr,
691 : char *buf)
692 : {
693 : struct radeon_device *rdev = dev_get_drvdata(dev);
694 : int hyst = to_sensor_dev_attr(attr)->index;
695 : int temp;
696 :
697 : if (hyst)
698 : temp = rdev->pm.dpm.thermal.min_temp;
699 : else
700 : temp = rdev->pm.dpm.thermal.max_temp;
701 :
702 : return snprintf(buf, PAGE_SIZE, "%d\n", temp);
703 : }
704 :
705 : static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
706 : static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
707 : static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
708 : static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
709 : static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
710 : static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
711 : static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
712 :
713 :
714 : static struct attribute *hwmon_attributes[] = {
715 : &sensor_dev_attr_temp1_input.dev_attr.attr,
716 : &sensor_dev_attr_temp1_crit.dev_attr.attr,
717 : &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
718 : &sensor_dev_attr_pwm1.dev_attr.attr,
719 : &sensor_dev_attr_pwm1_enable.dev_attr.attr,
720 : &sensor_dev_attr_pwm1_min.dev_attr.attr,
721 : &sensor_dev_attr_pwm1_max.dev_attr.attr,
722 : NULL
723 : };
724 :
725 : static umode_t hwmon_attributes_visible(struct kobject *kobj,
726 : struct attribute *attr, int index)
727 : {
728 : struct device *dev = container_of(kobj, struct device, kobj);
729 : struct radeon_device *rdev = dev_get_drvdata(dev);
730 : umode_t effective_mode = attr->mode;
731 :
732 : /* Skip attributes if DPM is not enabled */
733 : if (rdev->pm.pm_method != PM_METHOD_DPM &&
734 : (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
735 : attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
736 : attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
737 : attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
738 : attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
739 : attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
740 : return 0;
741 :
742 : /* Skip fan attributes if fan is not present */
743 : if (rdev->pm.no_fan &&
744 : (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
745 : attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
746 : attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
747 : attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
748 : return 0;
749 :
750 : /* mask fan attributes if we have no bindings for this asic to expose */
751 : if ((!rdev->asic->dpm.get_fan_speed_percent &&
752 : attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
753 : (!rdev->asic->dpm.fan_ctrl_get_mode &&
754 : attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
755 : effective_mode &= ~S_IRUGO;
756 :
757 : if ((!rdev->asic->dpm.set_fan_speed_percent &&
758 : attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
759 : (!rdev->asic->dpm.fan_ctrl_set_mode &&
760 : attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
761 : effective_mode &= ~S_IWUSR;
762 :
763 : /* hide max/min values if we can't both query and manage the fan */
764 : if ((!rdev->asic->dpm.set_fan_speed_percent &&
765 : !rdev->asic->dpm.get_fan_speed_percent) &&
766 : (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
767 : attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
768 : return 0;
769 :
770 : return effective_mode;
771 : }
772 :
773 : static const struct attribute_group hwmon_attrgroup = {
774 : .attrs = hwmon_attributes,
775 : .is_visible = hwmon_attributes_visible,
776 : };
777 :
778 : static const struct attribute_group *hwmon_groups[] = {
779 : &hwmon_attrgroup,
780 : NULL
781 : };
782 : #endif
783 :
784 0 : static int radeon_hwmon_init(struct radeon_device *rdev)
785 : {
786 : int err = 0;
787 :
788 0 : switch (rdev->pm.int_thermal_type) {
789 : case THERMAL_TYPE_RV6XX:
790 : case THERMAL_TYPE_RV770:
791 : case THERMAL_TYPE_EVERGREEN:
792 : case THERMAL_TYPE_NI:
793 : case THERMAL_TYPE_SUMO:
794 : case THERMAL_TYPE_SI:
795 : case THERMAL_TYPE_CI:
796 : case THERMAL_TYPE_KV:
797 0 : if (rdev->asic->pm.get_temperature == NULL)
798 0 : return err;
799 : #ifdef notyet
800 : rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
801 : "radeon", rdev,
802 : hwmon_groups);
803 : if (IS_ERR(rdev->pm.int_hwmon_dev)) {
804 : err = PTR_ERR(rdev->pm.int_hwmon_dev);
805 : dev_err(rdev->dev,
806 : "Unable to register hwmon device: %d\n", err);
807 : }
808 : #endif
809 : break;
810 : default:
811 : break;
812 : }
813 :
814 0 : return err;
815 0 : }
816 :
817 0 : static void radeon_hwmon_fini(struct radeon_device *rdev)
818 : {
819 : #ifdef notyet
820 : if (rdev->pm.int_hwmon_dev)
821 : hwmon_device_unregister(rdev->pm.int_hwmon_dev);
822 : #endif
823 0 : }
824 :
825 0 : static void radeon_dpm_thermal_work_handler(struct work_struct *work)
826 : {
827 : struct radeon_device *rdev =
828 0 : container_of(work, struct radeon_device,
829 : pm.dpm.thermal.work);
830 : /* switch to the thermal state */
831 : enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
832 :
833 0 : if (!rdev->pm.dpm_enabled)
834 0 : return;
835 :
836 0 : if (rdev->asic->pm.get_temperature) {
837 0 : int temp = radeon_get_temperature(rdev);
838 :
839 0 : if (temp < rdev->pm.dpm.thermal.min_temp)
840 : /* switch back the user state */
841 0 : dpm_state = rdev->pm.dpm.user_state;
842 0 : } else {
843 0 : if (rdev->pm.dpm.thermal.high_to_low)
844 : /* switch back the user state */
845 0 : dpm_state = rdev->pm.dpm.user_state;
846 : }
847 0 : mutex_lock(&rdev->pm.mutex);
848 0 : if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
849 0 : rdev->pm.dpm.thermal_active = true;
850 : else
851 0 : rdev->pm.dpm.thermal_active = false;
852 0 : rdev->pm.dpm.state = dpm_state;
853 0 : mutex_unlock(&rdev->pm.mutex);
854 :
855 0 : radeon_pm_compute_clocks(rdev);
856 0 : }
857 :
858 0 : static bool radeon_dpm_single_display(struct radeon_device *rdev)
859 : {
860 0 : bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
861 : true : false;
862 :
863 : /* check if the vblank period is too short to adjust the mclk */
864 0 : if (single_display && rdev->asic->dpm.vblank_too_short) {
865 0 : if (radeon_dpm_vblank_too_short(rdev))
866 0 : single_display = false;
867 : }
868 :
869 : /* 120hz tends to be problematic even if they are under the
870 : * vblank limit.
871 : */
872 0 : if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
873 0 : single_display = false;
874 :
875 0 : return single_display;
876 : }
877 :
878 0 : static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
879 : enum radeon_pm_state_type dpm_state)
880 : {
881 : int i;
882 : struct radeon_ps *ps;
883 : u32 ui_class;
884 0 : bool single_display = radeon_dpm_single_display(rdev);
885 :
886 : /* certain older asics have a separare 3D performance state,
887 : * so try that first if the user selected performance
888 : */
889 0 : if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
890 0 : dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
891 : /* balanced states don't exist at the moment */
892 0 : if (dpm_state == POWER_STATE_TYPE_BALANCED)
893 0 : dpm_state = POWER_STATE_TYPE_PERFORMANCE;
894 :
895 : restart_search:
896 : /* Pick the best power state based on current conditions */
897 0 : for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
898 0 : ps = &rdev->pm.dpm.ps[i];
899 0 : ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
900 0 : switch (dpm_state) {
901 : /* user states */
902 : case POWER_STATE_TYPE_BATTERY:
903 0 : if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
904 0 : if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
905 0 : if (single_display)
906 0 : return ps;
907 : } else
908 0 : return ps;
909 : }
910 : break;
911 : case POWER_STATE_TYPE_BALANCED:
912 0 : if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
913 0 : if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
914 0 : if (single_display)
915 0 : return ps;
916 : } else
917 0 : return ps;
918 : }
919 : break;
920 : case POWER_STATE_TYPE_PERFORMANCE:
921 0 : if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
922 0 : if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
923 0 : if (single_display)
924 0 : return ps;
925 : } else
926 0 : return ps;
927 : }
928 : break;
929 : /* internal states */
930 : case POWER_STATE_TYPE_INTERNAL_UVD:
931 0 : if (rdev->pm.dpm.uvd_ps)
932 0 : return rdev->pm.dpm.uvd_ps;
933 : else
934 : break;
935 : case POWER_STATE_TYPE_INTERNAL_UVD_SD:
936 0 : if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
937 0 : return ps;
938 : break;
939 : case POWER_STATE_TYPE_INTERNAL_UVD_HD:
940 0 : if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
941 0 : return ps;
942 : break;
943 : case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
944 0 : if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
945 0 : return ps;
946 : break;
947 : case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
948 0 : if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
949 0 : return ps;
950 : break;
951 : case POWER_STATE_TYPE_INTERNAL_BOOT:
952 0 : return rdev->pm.dpm.boot_ps;
953 : case POWER_STATE_TYPE_INTERNAL_THERMAL:
954 0 : if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
955 0 : return ps;
956 : break;
957 : case POWER_STATE_TYPE_INTERNAL_ACPI:
958 0 : if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
959 0 : return ps;
960 : break;
961 : case POWER_STATE_TYPE_INTERNAL_ULV:
962 0 : if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
963 0 : return ps;
964 : break;
965 : case POWER_STATE_TYPE_INTERNAL_3DPERF:
966 0 : if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
967 0 : return ps;
968 : break;
969 : default:
970 : break;
971 : }
972 : }
973 : /* use a fallback state if we didn't match */
974 0 : switch (dpm_state) {
975 : case POWER_STATE_TYPE_INTERNAL_UVD_SD:
976 : dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
977 0 : goto restart_search;
978 : case POWER_STATE_TYPE_INTERNAL_UVD_HD:
979 : case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
980 : case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
981 0 : if (rdev->pm.dpm.uvd_ps) {
982 0 : return rdev->pm.dpm.uvd_ps;
983 : } else {
984 : dpm_state = POWER_STATE_TYPE_PERFORMANCE;
985 0 : goto restart_search;
986 : }
987 : case POWER_STATE_TYPE_INTERNAL_THERMAL:
988 : dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
989 0 : goto restart_search;
990 : case POWER_STATE_TYPE_INTERNAL_ACPI:
991 : dpm_state = POWER_STATE_TYPE_BATTERY;
992 0 : goto restart_search;
993 : case POWER_STATE_TYPE_BATTERY:
994 : case POWER_STATE_TYPE_BALANCED:
995 : case POWER_STATE_TYPE_INTERNAL_3DPERF:
996 : dpm_state = POWER_STATE_TYPE_PERFORMANCE;
997 0 : goto restart_search;
998 : default:
999 : break;
1000 : }
1001 :
1002 0 : return NULL;
1003 0 : }
1004 :
1005 0 : static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1006 : {
1007 : int i;
1008 : struct radeon_ps *ps;
1009 : enum radeon_pm_state_type dpm_state;
1010 : int ret;
1011 0 : bool single_display = radeon_dpm_single_display(rdev);
1012 :
1013 : /* if dpm init failed */
1014 0 : if (!rdev->pm.dpm_enabled)
1015 0 : return;
1016 :
1017 0 : if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
1018 : /* add other state override checks here */
1019 0 : if ((!rdev->pm.dpm.thermal_active) &&
1020 0 : (!rdev->pm.dpm.uvd_active))
1021 0 : rdev->pm.dpm.state = rdev->pm.dpm.user_state;
1022 : }
1023 0 : dpm_state = rdev->pm.dpm.state;
1024 :
1025 0 : ps = radeon_dpm_pick_power_state(rdev, dpm_state);
1026 0 : if (ps)
1027 0 : rdev->pm.dpm.requested_ps = ps;
1028 : else
1029 0 : return;
1030 :
1031 : /* no need to reprogram if nothing changed unless we are on BTC+ */
1032 0 : if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
1033 : /* vce just modifies an existing state so force a change */
1034 0 : if (ps->vce_active != rdev->pm.dpm.vce_active)
1035 : goto force;
1036 : /* user has made a display change (such as timing) */
1037 0 : if (rdev->pm.dpm.single_display != single_display)
1038 : goto force;
1039 0 : if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
1040 : /* for pre-BTC and APUs if the num crtcs changed but state is the same,
1041 : * all we need to do is update the display configuration.
1042 : */
1043 0 : if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
1044 : /* update display watermarks based on new power state */
1045 0 : radeon_bandwidth_update(rdev);
1046 : /* update displays */
1047 0 : radeon_dpm_display_configuration_changed(rdev);
1048 0 : rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1049 0 : rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1050 0 : }
1051 0 : return;
1052 : } else {
1053 : /* for BTC+ if the num crtcs hasn't changed and state is the same,
1054 : * nothing to do, if the num crtcs is > 1 and state is the same,
1055 : * update display configuration.
1056 : */
1057 0 : if (rdev->pm.dpm.new_active_crtcs ==
1058 0 : rdev->pm.dpm.current_active_crtcs) {
1059 0 : return;
1060 : } else {
1061 0 : if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
1062 0 : (rdev->pm.dpm.new_active_crtc_count > 1)) {
1063 : /* update display watermarks based on new power state */
1064 0 : radeon_bandwidth_update(rdev);
1065 : /* update displays */
1066 0 : radeon_dpm_display_configuration_changed(rdev);
1067 0 : rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1068 0 : rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1069 0 : return;
1070 : }
1071 : }
1072 : }
1073 : }
1074 :
1075 : force:
1076 0 : if (radeon_dpm == 1) {
1077 0 : printk("switching from power state:\n");
1078 0 : radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
1079 0 : printk("switching to power state:\n");
1080 0 : radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
1081 0 : }
1082 :
1083 0 : down_write(&rdev->pm.mclk_lock);
1084 0 : mutex_lock(&rdev->ring_lock);
1085 :
1086 : /* update whether vce is active */
1087 0 : ps->vce_active = rdev->pm.dpm.vce_active;
1088 :
1089 0 : ret = radeon_dpm_pre_set_power_state(rdev);
1090 0 : if (ret)
1091 : goto done;
1092 :
1093 : /* update display watermarks based on new power state */
1094 0 : radeon_bandwidth_update(rdev);
1095 : /* update displays */
1096 0 : radeon_dpm_display_configuration_changed(rdev);
1097 :
1098 : /* wait for the rings to drain */
1099 0 : for (i = 0; i < RADEON_NUM_RINGS; i++) {
1100 0 : struct radeon_ring *ring = &rdev->ring[i];
1101 0 : if (ring->ready)
1102 0 : radeon_fence_wait_empty(rdev, i);
1103 : }
1104 :
1105 : /* program the new power state */
1106 0 : radeon_dpm_set_power_state(rdev);
1107 :
1108 : /* update current power state */
1109 0 : rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
1110 :
1111 0 : radeon_dpm_post_set_power_state(rdev);
1112 :
1113 0 : rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1114 0 : rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1115 0 : rdev->pm.dpm.single_display = single_display;
1116 :
1117 0 : if (rdev->asic->dpm.force_performance_level) {
1118 0 : if (rdev->pm.dpm.thermal_active) {
1119 0 : enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
1120 : /* force low perf level for thermal */
1121 0 : radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
1122 : /* save the user's level */
1123 0 : rdev->pm.dpm.forced_level = level;
1124 0 : } else {
1125 : /* otherwise, user selected level */
1126 0 : radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
1127 : }
1128 : }
1129 :
1130 : done:
1131 0 : mutex_unlock(&rdev->ring_lock);
1132 0 : up_write(&rdev->pm.mclk_lock);
1133 0 : }
1134 :
1135 0 : void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1136 : {
1137 : enum radeon_pm_state_type dpm_state;
1138 :
1139 0 : if (rdev->asic->dpm.powergate_uvd) {
1140 0 : mutex_lock(&rdev->pm.mutex);
1141 : /* don't powergate anything if we
1142 : have active but pause streams */
1143 0 : enable |= rdev->pm.dpm.sd > 0;
1144 0 : enable |= rdev->pm.dpm.hd > 0;
1145 : /* enable/disable UVD */
1146 0 : radeon_dpm_powergate_uvd(rdev, !enable);
1147 0 : mutex_unlock(&rdev->pm.mutex);
1148 0 : } else {
1149 0 : if (enable) {
1150 : mutex_lock(&rdev->pm.mutex);
1151 0 : rdev->pm.dpm.uvd_active = true;
1152 : /* disable this for now */
1153 : #if 0
1154 : if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1155 : dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1156 : else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1157 : dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1158 : else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
1159 : dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1160 : else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1161 : dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1162 : else
1163 : #endif
1164 : dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1165 0 : rdev->pm.dpm.state = dpm_state;
1166 0 : mutex_unlock(&rdev->pm.mutex);
1167 0 : } else {
1168 : mutex_lock(&rdev->pm.mutex);
1169 0 : rdev->pm.dpm.uvd_active = false;
1170 0 : mutex_unlock(&rdev->pm.mutex);
1171 : }
1172 :
1173 0 : radeon_pm_compute_clocks(rdev);
1174 : }
1175 0 : }
1176 :
1177 0 : void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1178 : {
1179 0 : if (enable) {
1180 : mutex_lock(&rdev->pm.mutex);
1181 0 : rdev->pm.dpm.vce_active = true;
1182 : /* XXX select vce level based on ring/task */
1183 0 : rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1184 0 : mutex_unlock(&rdev->pm.mutex);
1185 0 : } else {
1186 : mutex_lock(&rdev->pm.mutex);
1187 0 : rdev->pm.dpm.vce_active = false;
1188 0 : mutex_unlock(&rdev->pm.mutex);
1189 : }
1190 :
1191 0 : radeon_pm_compute_clocks(rdev);
1192 0 : }
1193 :
1194 0 : static void radeon_pm_suspend_old(struct radeon_device *rdev)
1195 : {
1196 0 : mutex_lock(&rdev->pm.mutex);
1197 0 : if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1198 0 : if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1199 0 : rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1200 : }
1201 0 : mutex_unlock(&rdev->pm.mutex);
1202 :
1203 0 : cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1204 0 : }
1205 :
1206 0 : static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1207 : {
1208 0 : mutex_lock(&rdev->pm.mutex);
1209 : /* disable dpm */
1210 0 : radeon_dpm_disable(rdev);
1211 : /* reset the power state */
1212 0 : rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1213 0 : rdev->pm.dpm_enabled = false;
1214 0 : mutex_unlock(&rdev->pm.mutex);
1215 0 : }
1216 :
1217 0 : void radeon_pm_suspend(struct radeon_device *rdev)
1218 : {
1219 0 : if (rdev->pm.pm_method == PM_METHOD_DPM)
1220 0 : radeon_pm_suspend_dpm(rdev);
1221 : else
1222 0 : radeon_pm_suspend_old(rdev);
1223 0 : }
1224 :
1225 0 : static void radeon_pm_resume_old(struct radeon_device *rdev)
1226 : {
1227 : /* set up the default clocks if the MC ucode is loaded */
1228 0 : if ((rdev->family >= CHIP_BARTS) &&
1229 0 : (rdev->family <= CHIP_CAYMAN) &&
1230 0 : rdev->mc_fw) {
1231 0 : if (rdev->pm.default_vddc)
1232 0 : radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1233 : SET_VOLTAGE_TYPE_ASIC_VDDC);
1234 0 : if (rdev->pm.default_vddci)
1235 0 : radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1236 : SET_VOLTAGE_TYPE_ASIC_VDDCI);
1237 0 : if (rdev->pm.default_sclk)
1238 0 : radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1239 0 : if (rdev->pm.default_mclk)
1240 0 : radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1241 : }
1242 : /* asic init will reset the default power state */
1243 0 : mutex_lock(&rdev->pm.mutex);
1244 0 : rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1245 0 : rdev->pm.current_clock_mode_index = 0;
1246 0 : rdev->pm.current_sclk = rdev->pm.default_sclk;
1247 0 : rdev->pm.current_mclk = rdev->pm.default_mclk;
1248 0 : if (rdev->pm.power_state) {
1249 0 : rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1250 0 : rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1251 0 : }
1252 0 : if (rdev->pm.pm_method == PM_METHOD_DYNPM
1253 0 : && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1254 0 : rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1255 0 : schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1256 0 : msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1257 0 : }
1258 0 : mutex_unlock(&rdev->pm.mutex);
1259 0 : radeon_pm_compute_clocks(rdev);
1260 0 : }
1261 :
1262 0 : static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1263 : {
1264 : int ret;
1265 :
1266 : /* asic init will reset to the boot state */
1267 0 : mutex_lock(&rdev->pm.mutex);
1268 0 : rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1269 0 : radeon_dpm_setup_asic(rdev);
1270 0 : ret = radeon_dpm_enable(rdev);
1271 0 : mutex_unlock(&rdev->pm.mutex);
1272 0 : if (ret)
1273 : goto dpm_resume_fail;
1274 0 : rdev->pm.dpm_enabled = true;
1275 0 : return;
1276 :
1277 : dpm_resume_fail:
1278 0 : DRM_ERROR("radeon: dpm resume failed\n");
1279 0 : if ((rdev->family >= CHIP_BARTS) &&
1280 0 : (rdev->family <= CHIP_CAYMAN) &&
1281 0 : rdev->mc_fw) {
1282 0 : if (rdev->pm.default_vddc)
1283 0 : radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1284 : SET_VOLTAGE_TYPE_ASIC_VDDC);
1285 0 : if (rdev->pm.default_vddci)
1286 0 : radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1287 : SET_VOLTAGE_TYPE_ASIC_VDDCI);
1288 0 : if (rdev->pm.default_sclk)
1289 0 : radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1290 0 : if (rdev->pm.default_mclk)
1291 0 : radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1292 : }
1293 0 : }
1294 :
1295 0 : void radeon_pm_resume(struct radeon_device *rdev)
1296 : {
1297 0 : if (rdev->pm.pm_method == PM_METHOD_DPM)
1298 0 : radeon_pm_resume_dpm(rdev);
1299 : else
1300 0 : radeon_pm_resume_old(rdev);
1301 0 : }
1302 :
1303 0 : static int radeon_pm_init_old(struct radeon_device *rdev)
1304 : {
1305 : int ret;
1306 :
1307 0 : rdev->pm.profile = PM_PROFILE_DEFAULT;
1308 0 : rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1309 0 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1310 0 : rdev->pm.dynpm_can_upclock = true;
1311 0 : rdev->pm.dynpm_can_downclock = true;
1312 0 : rdev->pm.default_sclk = rdev->clock.default_sclk;
1313 0 : rdev->pm.default_mclk = rdev->clock.default_mclk;
1314 0 : rdev->pm.current_sclk = rdev->clock.default_sclk;
1315 0 : rdev->pm.current_mclk = rdev->clock.default_mclk;
1316 0 : rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1317 :
1318 0 : if (rdev->bios) {
1319 0 : if (rdev->is_atom_bios)
1320 0 : radeon_atombios_get_power_modes(rdev);
1321 : else
1322 0 : radeon_combios_get_power_modes(rdev);
1323 0 : radeon_pm_print_states(rdev);
1324 0 : radeon_pm_init_profile(rdev);
1325 : /* set up the default clocks if the MC ucode is loaded */
1326 0 : if ((rdev->family >= CHIP_BARTS) &&
1327 0 : (rdev->family <= CHIP_CAYMAN) &&
1328 0 : rdev->mc_fw) {
1329 0 : if (rdev->pm.default_vddc)
1330 0 : radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1331 : SET_VOLTAGE_TYPE_ASIC_VDDC);
1332 0 : if (rdev->pm.default_vddci)
1333 0 : radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1334 : SET_VOLTAGE_TYPE_ASIC_VDDCI);
1335 0 : if (rdev->pm.default_sclk)
1336 0 : radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1337 0 : if (rdev->pm.default_mclk)
1338 0 : radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1339 : }
1340 : }
1341 :
1342 : /* set up the internal thermal sensor if applicable */
1343 0 : ret = radeon_hwmon_init(rdev);
1344 0 : if (ret)
1345 0 : return ret;
1346 :
1347 0 : INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1348 :
1349 0 : if (rdev->pm.num_power_states > 1) {
1350 0 : if (radeon_debugfs_pm_init(rdev)) {
1351 0 : DRM_ERROR("Failed to register debugfs file for PM!\n");
1352 0 : }
1353 :
1354 : DRM_INFO("radeon: power management initialized\n");
1355 : }
1356 :
1357 0 : return 0;
1358 0 : }
1359 :
1360 0 : static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1361 : {
1362 : int i;
1363 :
1364 0 : for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1365 0 : printk("== power state %d ==\n", i);
1366 0 : radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1367 : }
1368 0 : }
1369 :
1370 0 : static int radeon_pm_init_dpm(struct radeon_device *rdev)
1371 : {
1372 : int ret;
1373 :
1374 : /* default to balanced state */
1375 0 : rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1376 0 : rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1377 0 : rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1378 0 : rdev->pm.default_sclk = rdev->clock.default_sclk;
1379 0 : rdev->pm.default_mclk = rdev->clock.default_mclk;
1380 0 : rdev->pm.current_sclk = rdev->clock.default_sclk;
1381 0 : rdev->pm.current_mclk = rdev->clock.default_mclk;
1382 0 : rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1383 :
1384 0 : if (rdev->bios && rdev->is_atom_bios)
1385 0 : radeon_atombios_get_power_modes(rdev);
1386 : else
1387 0 : return -EINVAL;
1388 :
1389 : /* set up the internal thermal sensor if applicable */
1390 0 : ret = radeon_hwmon_init(rdev);
1391 0 : if (ret)
1392 0 : return ret;
1393 :
1394 0 : INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1395 0 : mutex_lock(&rdev->pm.mutex);
1396 0 : radeon_dpm_init(rdev);
1397 0 : rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1398 0 : if (radeon_dpm == 1)
1399 0 : radeon_dpm_print_power_states(rdev);
1400 0 : radeon_dpm_setup_asic(rdev);
1401 0 : ret = radeon_dpm_enable(rdev);
1402 0 : mutex_unlock(&rdev->pm.mutex);
1403 0 : if (ret)
1404 : goto dpm_failed;
1405 0 : rdev->pm.dpm_enabled = true;
1406 :
1407 0 : if (radeon_debugfs_pm_init(rdev)) {
1408 0 : DRM_ERROR("Failed to register debugfs file for dpm!\n");
1409 0 : }
1410 :
1411 : DRM_INFO("radeon: dpm initialized\n");
1412 :
1413 0 : return 0;
1414 :
1415 : dpm_failed:
1416 0 : rdev->pm.dpm_enabled = false;
1417 0 : if ((rdev->family >= CHIP_BARTS) &&
1418 0 : (rdev->family <= CHIP_CAYMAN) &&
1419 0 : rdev->mc_fw) {
1420 0 : if (rdev->pm.default_vddc)
1421 0 : radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1422 : SET_VOLTAGE_TYPE_ASIC_VDDC);
1423 0 : if (rdev->pm.default_vddci)
1424 0 : radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1425 : SET_VOLTAGE_TYPE_ASIC_VDDCI);
1426 0 : if (rdev->pm.default_sclk)
1427 0 : radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1428 0 : if (rdev->pm.default_mclk)
1429 0 : radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1430 : }
1431 0 : DRM_ERROR("radeon: dpm initialization failed\n");
1432 0 : return ret;
1433 0 : }
1434 :
1435 : struct radeon_dpm_quirk {
1436 : u32 chip_vendor;
1437 : u32 chip_device;
1438 : u32 subsys_vendor;
1439 : u32 subsys_device;
1440 : };
1441 :
1442 : /* cards with dpm stability problems */
1443 : static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1444 : /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1445 : { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1446 : /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1447 : { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1448 : { 0, 0, 0, 0 },
1449 : };
1450 :
1451 0 : int radeon_pm_init(struct radeon_device *rdev)
1452 : {
1453 : struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1454 : bool disable_dpm = false;
1455 :
1456 : /* Apply dpm quirks */
1457 0 : while (p && p->chip_device != 0) {
1458 0 : if (rdev->pdev->vendor == p->chip_vendor &&
1459 0 : rdev->pdev->device == p->chip_device &&
1460 0 : rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1461 0 : rdev->pdev->subsystem_device == p->subsys_device) {
1462 : disable_dpm = true;
1463 0 : break;
1464 : }
1465 0 : ++p;
1466 : }
1467 :
1468 : /* enable dpm on rv6xx+ */
1469 0 : switch (rdev->family) {
1470 : case CHIP_RV610:
1471 : case CHIP_RV630:
1472 : case CHIP_RV620:
1473 : case CHIP_RV635:
1474 : case CHIP_RV670:
1475 : case CHIP_RS780:
1476 : case CHIP_RS880:
1477 : case CHIP_RV770:
1478 : /* DPM requires the RLC, RV770+ dGPU requires SMC */
1479 0 : if (!rdev->rlc_fw)
1480 0 : rdev->pm.pm_method = PM_METHOD_PROFILE;
1481 0 : else if ((rdev->family >= CHIP_RV770) &&
1482 0 : (!(rdev->flags & RADEON_IS_IGP)) &&
1483 0 : (!rdev->smc_fw))
1484 0 : rdev->pm.pm_method = PM_METHOD_PROFILE;
1485 0 : else if (radeon_dpm == 1)
1486 0 : rdev->pm.pm_method = PM_METHOD_DPM;
1487 : else
1488 0 : rdev->pm.pm_method = PM_METHOD_PROFILE;
1489 : break;
1490 : case CHIP_RV730:
1491 : case CHIP_RV710:
1492 : case CHIP_RV740:
1493 : case CHIP_CEDAR:
1494 : case CHIP_REDWOOD:
1495 : case CHIP_JUNIPER:
1496 : case CHIP_CYPRESS:
1497 : case CHIP_HEMLOCK:
1498 : case CHIP_PALM:
1499 : case CHIP_SUMO:
1500 : case CHIP_SUMO2:
1501 : case CHIP_BARTS:
1502 : case CHIP_TURKS:
1503 : case CHIP_CAICOS:
1504 : case CHIP_CAYMAN:
1505 : case CHIP_ARUBA:
1506 : case CHIP_TAHITI:
1507 : case CHIP_PITCAIRN:
1508 : case CHIP_VERDE:
1509 : case CHIP_OLAND:
1510 : case CHIP_HAINAN:
1511 : case CHIP_BONAIRE:
1512 : case CHIP_KABINI:
1513 : case CHIP_KAVERI:
1514 : case CHIP_HAWAII:
1515 : case CHIP_MULLINS:
1516 : /* DPM requires the RLC, RV770+ dGPU requires SMC */
1517 0 : if (!rdev->rlc_fw)
1518 0 : rdev->pm.pm_method = PM_METHOD_PROFILE;
1519 0 : else if ((rdev->family >= CHIP_RV770) &&
1520 0 : (!(rdev->flags & RADEON_IS_IGP)) &&
1521 0 : (!rdev->smc_fw))
1522 0 : rdev->pm.pm_method = PM_METHOD_PROFILE;
1523 0 : else if (disable_dpm && (radeon_dpm == -1))
1524 0 : rdev->pm.pm_method = PM_METHOD_PROFILE;
1525 0 : else if (radeon_dpm == 0)
1526 0 : rdev->pm.pm_method = PM_METHOD_PROFILE;
1527 : else
1528 0 : rdev->pm.pm_method = PM_METHOD_DPM;
1529 : break;
1530 : default:
1531 : /* default to profile method */
1532 0 : rdev->pm.pm_method = PM_METHOD_PROFILE;
1533 0 : break;
1534 : }
1535 :
1536 0 : if (rdev->pm.pm_method == PM_METHOD_DPM)
1537 0 : return radeon_pm_init_dpm(rdev);
1538 : else
1539 0 : return radeon_pm_init_old(rdev);
1540 0 : }
1541 :
1542 0 : int radeon_pm_late_init(struct radeon_device *rdev)
1543 : {
1544 : int ret = 0;
1545 :
1546 0 : if (rdev->pm.pm_method == PM_METHOD_DPM) {
1547 0 : if (rdev->pm.dpm_enabled) {
1548 : #ifdef __linux__
1549 : if (!rdev->pm.sysfs_initialized) {
1550 : ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1551 : if (ret)
1552 : DRM_ERROR("failed to create device file for dpm state\n");
1553 : ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1554 : if (ret)
1555 : DRM_ERROR("failed to create device file for dpm state\n");
1556 : /* XXX: these are noops for dpm but are here for backwards compat */
1557 : ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1558 : if (ret)
1559 : DRM_ERROR("failed to create device file for power profile\n");
1560 : ret = device_create_file(rdev->dev, &dev_attr_power_method);
1561 : if (ret)
1562 : DRM_ERROR("failed to create device file for power method\n");
1563 : rdev->pm.sysfs_initialized = true;
1564 : }
1565 : #endif
1566 :
1567 0 : mutex_lock(&rdev->pm.mutex);
1568 0 : ret = radeon_dpm_late_enable(rdev);
1569 0 : mutex_unlock(&rdev->pm.mutex);
1570 0 : if (ret) {
1571 0 : rdev->pm.dpm_enabled = false;
1572 0 : DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1573 0 : } else {
1574 : /* set the dpm state for PX since there won't be
1575 : * a modeset to call this.
1576 : */
1577 0 : radeon_pm_compute_clocks(rdev);
1578 : }
1579 : }
1580 : } else {
1581 : #ifdef __linux__
1582 : if ((rdev->pm.num_power_states > 1) &&
1583 : (!rdev->pm.sysfs_initialized)) {
1584 : /* where's the best place to put these? */
1585 : ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1586 : if (ret)
1587 : DRM_ERROR("failed to create device file for power profile\n");
1588 : ret = device_create_file(rdev->dev, &dev_attr_power_method);
1589 : if (ret)
1590 : DRM_ERROR("failed to create device file for power method\n");
1591 : if (!ret)
1592 : rdev->pm.sysfs_initialized = true;
1593 : }
1594 : #endif
1595 : }
1596 0 : return ret;
1597 : }
1598 :
1599 0 : static void radeon_pm_fini_old(struct radeon_device *rdev)
1600 : {
1601 0 : if (rdev->pm.num_power_states > 1) {
1602 0 : mutex_lock(&rdev->pm.mutex);
1603 0 : if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1604 0 : rdev->pm.profile = PM_PROFILE_DEFAULT;
1605 0 : radeon_pm_update_profile(rdev);
1606 0 : radeon_pm_set_clocks(rdev);
1607 0 : } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1608 : /* reset default clocks */
1609 0 : rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1610 0 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1611 0 : radeon_pm_set_clocks(rdev);
1612 0 : }
1613 0 : mutex_unlock(&rdev->pm.mutex);
1614 :
1615 0 : cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1616 :
1617 : #ifdef __linux__
1618 : device_remove_file(rdev->dev, &dev_attr_power_profile);
1619 : device_remove_file(rdev->dev, &dev_attr_power_method);
1620 : #endif
1621 0 : }
1622 :
1623 0 : radeon_hwmon_fini(rdev);
1624 0 : kfree(rdev->pm.power_state);
1625 0 : }
1626 :
1627 0 : static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1628 : {
1629 0 : if (rdev->pm.num_power_states > 1) {
1630 0 : mutex_lock(&rdev->pm.mutex);
1631 0 : radeon_dpm_disable(rdev);
1632 0 : mutex_unlock(&rdev->pm.mutex);
1633 :
1634 : #ifdef __linux__
1635 : device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1636 : device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1637 : /* XXX backwards compat */
1638 : device_remove_file(rdev->dev, &dev_attr_power_profile);
1639 : device_remove_file(rdev->dev, &dev_attr_power_method);
1640 : #endif
1641 0 : }
1642 0 : radeon_dpm_fini(rdev);
1643 :
1644 0 : radeon_hwmon_fini(rdev);
1645 0 : kfree(rdev->pm.power_state);
1646 0 : }
1647 :
1648 0 : void radeon_pm_fini(struct radeon_device *rdev)
1649 : {
1650 0 : if (rdev->pm.pm_method == PM_METHOD_DPM)
1651 0 : radeon_pm_fini_dpm(rdev);
1652 : else
1653 0 : radeon_pm_fini_old(rdev);
1654 0 : }
1655 :
1656 0 : static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1657 : {
1658 0 : struct drm_device *ddev = rdev->ddev;
1659 : struct drm_crtc *crtc;
1660 : struct radeon_crtc *radeon_crtc;
1661 :
1662 0 : if (rdev->pm.num_power_states < 2)
1663 0 : return;
1664 :
1665 0 : mutex_lock(&rdev->pm.mutex);
1666 :
1667 0 : rdev->pm.active_crtcs = 0;
1668 0 : rdev->pm.active_crtc_count = 0;
1669 0 : if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1670 0 : list_for_each_entry(crtc,
1671 : &ddev->mode_config.crtc_list, head) {
1672 0 : radeon_crtc = to_radeon_crtc(crtc);
1673 0 : if (radeon_crtc->enabled) {
1674 0 : rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1675 0 : rdev->pm.active_crtc_count++;
1676 0 : }
1677 : }
1678 : }
1679 :
1680 0 : if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1681 0 : radeon_pm_update_profile(rdev);
1682 0 : radeon_pm_set_clocks(rdev);
1683 0 : } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1684 0 : if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1685 0 : if (rdev->pm.active_crtc_count > 1) {
1686 0 : if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1687 0 : cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1688 :
1689 0 : rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1690 0 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1691 0 : radeon_pm_get_dynpm_state(rdev);
1692 0 : radeon_pm_set_clocks(rdev);
1693 :
1694 : DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1695 0 : }
1696 0 : } else if (rdev->pm.active_crtc_count == 1) {
1697 : /* TODO: Increase clocks if needed for current mode */
1698 :
1699 0 : if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1700 0 : rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1701 0 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1702 0 : radeon_pm_get_dynpm_state(rdev);
1703 0 : radeon_pm_set_clocks(rdev);
1704 :
1705 0 : schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1706 0 : msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1707 0 : } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1708 0 : rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1709 0 : schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1710 0 : msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1711 : DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1712 0 : }
1713 : } else { /* count == 0 */
1714 0 : if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1715 0 : cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1716 :
1717 0 : rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1718 0 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1719 0 : radeon_pm_get_dynpm_state(rdev);
1720 0 : radeon_pm_set_clocks(rdev);
1721 0 : }
1722 : }
1723 : }
1724 : }
1725 :
1726 0 : mutex_unlock(&rdev->pm.mutex);
1727 0 : }
1728 :
1729 0 : static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1730 : {
1731 0 : struct drm_device *ddev = rdev->ddev;
1732 : struct drm_crtc *crtc;
1733 : struct radeon_crtc *radeon_crtc;
1734 :
1735 0 : if (!rdev->pm.dpm_enabled)
1736 0 : return;
1737 :
1738 0 : mutex_lock(&rdev->pm.mutex);
1739 :
1740 : /* update active crtc counts */
1741 0 : rdev->pm.dpm.new_active_crtcs = 0;
1742 0 : rdev->pm.dpm.new_active_crtc_count = 0;
1743 0 : if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1744 0 : list_for_each_entry(crtc,
1745 : &ddev->mode_config.crtc_list, head) {
1746 0 : radeon_crtc = to_radeon_crtc(crtc);
1747 0 : if (crtc->enabled) {
1748 0 : rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1749 0 : rdev->pm.dpm.new_active_crtc_count++;
1750 0 : }
1751 : }
1752 : }
1753 :
1754 : /* update battery/ac status */
1755 0 : if (power_supply_is_system_supplied() > 0)
1756 0 : rdev->pm.dpm.ac_power = true;
1757 : else
1758 0 : rdev->pm.dpm.ac_power = false;
1759 :
1760 0 : radeon_dpm_change_power_state_locked(rdev);
1761 :
1762 0 : mutex_unlock(&rdev->pm.mutex);
1763 :
1764 0 : }
1765 :
1766 0 : void radeon_pm_compute_clocks(struct radeon_device *rdev)
1767 : {
1768 0 : if (rdev->pm.pm_method == PM_METHOD_DPM)
1769 0 : radeon_pm_compute_clocks_dpm(rdev);
1770 : else
1771 0 : radeon_pm_compute_clocks_old(rdev);
1772 0 : }
1773 :
1774 0 : static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1775 : {
1776 0 : int crtc, vpos, hpos, vbl_status;
1777 : bool in_vbl = true;
1778 :
1779 : /* Iterate over all active crtc's. All crtc's must be in vblank,
1780 : * otherwise return in_vbl == false.
1781 : */
1782 0 : for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1783 0 : if (rdev->pm.active_crtcs & (1 << crtc)) {
1784 0 : vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1785 : crtc,
1786 : USE_REAL_VBLANKSTART,
1787 : &vpos, &hpos, NULL, NULL,
1788 0 : &rdev->mode_info.crtcs[crtc]->base.hwmode);
1789 0 : if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1790 0 : !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1791 0 : in_vbl = false;
1792 : }
1793 : }
1794 :
1795 0 : return in_vbl;
1796 0 : }
1797 :
1798 0 : static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1799 : {
1800 : #ifdef DRMDEBUG
1801 : u32 stat_crtc = 0;
1802 : #endif
1803 0 : bool in_vbl = radeon_pm_in_vbl(rdev);
1804 :
1805 0 : if (in_vbl == false)
1806 : DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1807 : finish ? "exit" : "entry");
1808 0 : return in_vbl;
1809 : }
1810 :
1811 0 : static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1812 : {
1813 : struct radeon_device *rdev;
1814 : int resched;
1815 0 : rdev = container_of(work, struct radeon_device,
1816 : pm.dynpm_idle_work.work);
1817 :
1818 0 : resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1819 0 : mutex_lock(&rdev->pm.mutex);
1820 0 : if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1821 : int not_processed = 0;
1822 : int i;
1823 :
1824 0 : for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1825 0 : struct radeon_ring *ring = &rdev->ring[i];
1826 :
1827 0 : if (ring->ready) {
1828 0 : not_processed += radeon_fence_count_emitted(rdev, i);
1829 0 : if (not_processed >= 3)
1830 0 : break;
1831 : }
1832 0 : }
1833 :
1834 0 : if (not_processed >= 3) { /* should upclock */
1835 0 : if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1836 0 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1837 0 : } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1838 0 : rdev->pm.dynpm_can_upclock) {
1839 0 : rdev->pm.dynpm_planned_action =
1840 : DYNPM_ACTION_UPCLOCK;
1841 0 : rdev->pm.dynpm_action_timeout = jiffies +
1842 0 : msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1843 0 : }
1844 0 : } else if (not_processed == 0) { /* should downclock */
1845 0 : if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1846 0 : rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1847 0 : } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1848 0 : rdev->pm.dynpm_can_downclock) {
1849 0 : rdev->pm.dynpm_planned_action =
1850 : DYNPM_ACTION_DOWNCLOCK;
1851 0 : rdev->pm.dynpm_action_timeout = jiffies +
1852 0 : msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1853 0 : }
1854 : }
1855 :
1856 : /* Note, radeon_pm_set_clocks is called with static_switch set
1857 : * to false since we want to wait for vbl to avoid flicker.
1858 : */
1859 0 : if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1860 0 : jiffies > rdev->pm.dynpm_action_timeout) {
1861 0 : radeon_pm_get_dynpm_state(rdev);
1862 0 : radeon_pm_set_clocks(rdev);
1863 0 : }
1864 :
1865 0 : schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1866 0 : msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1867 0 : }
1868 0 : mutex_unlock(&rdev->pm.mutex);
1869 0 : ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1870 0 : }
1871 :
1872 : /*
1873 : * Debugfs info
1874 : */
1875 : #if defined(CONFIG_DEBUG_FS)
1876 :
1877 : static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1878 : {
1879 : struct drm_info_node *node = (struct drm_info_node *) m->private;
1880 : struct drm_device *dev = node->minor->dev;
1881 : struct radeon_device *rdev = dev->dev_private;
1882 : struct drm_device *ddev = rdev->ddev;
1883 :
1884 : if ((rdev->flags & RADEON_IS_PX) &&
1885 : (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1886 : seq_printf(m, "PX asic powered off\n");
1887 : } else if (rdev->pm.dpm_enabled) {
1888 : mutex_lock(&rdev->pm.mutex);
1889 : if (rdev->asic->dpm.debugfs_print_current_performance_level)
1890 : radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1891 : else
1892 : seq_printf(m, "Debugfs support not implemented for this asic\n");
1893 : mutex_unlock(&rdev->pm.mutex);
1894 : } else {
1895 : seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1896 : /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1897 : if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1898 : seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1899 : else
1900 : seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1901 : seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1902 : if (rdev->asic->pm.get_memory_clock)
1903 : seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1904 : if (rdev->pm.current_vddc)
1905 : seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1906 : if (rdev->asic->pm.get_pcie_lanes)
1907 : seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1908 : }
1909 :
1910 : return 0;
1911 : }
1912 :
1913 : static struct drm_info_list radeon_pm_info_list[] = {
1914 : {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1915 : };
1916 : #endif
1917 :
1918 0 : static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1919 : {
1920 : #if defined(CONFIG_DEBUG_FS)
1921 : return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1922 : #else
1923 0 : return 0;
1924 : #endif
1925 : }
|