Line data Source code
1 : /*
2 : * Copyright 2013 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : */
23 :
24 : #include <dev/pci/drm/drmP.h>
25 : #include "radeon.h"
26 : #include "cikd.h"
27 : #include "r600_dpm.h"
28 : #include "kv_dpm.h"
29 : #include "radeon_asic.h"
30 :
31 : #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
32 : #define KV_MINIMUM_ENGINE_CLOCK 800
33 : #define SMC_RAM_END 0x40000
34 :
35 : static int kv_enable_nb_dpm(struct radeon_device *rdev,
36 : bool enable);
37 : static void kv_init_graphics_levels(struct radeon_device *rdev);
38 : static int kv_calculate_ds_divider(struct radeon_device *rdev);
39 : static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
40 : static int kv_calculate_dpm_settings(struct radeon_device *rdev);
41 : static void kv_enable_new_levels(struct radeon_device *rdev);
42 : static void kv_program_nbps_index_settings(struct radeon_device *rdev,
43 : struct radeon_ps *new_rps);
44 : static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
45 : static int kv_set_enabled_levels(struct radeon_device *rdev);
46 : static int kv_force_dpm_highest(struct radeon_device *rdev);
47 : static int kv_force_dpm_lowest(struct radeon_device *rdev);
48 : static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
49 : struct radeon_ps *new_rps,
50 : struct radeon_ps *old_rps);
51 : static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
52 : int min_temp, int max_temp);
53 : static int kv_init_fps_limits(struct radeon_device *rdev);
54 :
55 : void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
56 : static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
57 : static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
58 : static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
59 :
60 : extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
61 : extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
62 : extern void cik_update_cg(struct radeon_device *rdev,
63 : u32 block, bool enable);
64 :
65 : static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
66 : {
67 : { 0, 4, 1 },
68 : { 1, 4, 1 },
69 : { 2, 5, 1 },
70 : { 3, 4, 2 },
71 : { 4, 1, 1 },
72 : { 5, 5, 2 },
73 : { 6, 6, 1 },
74 : { 7, 9, 2 },
75 : { 0xffffffff }
76 : };
77 :
78 : static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
79 : {
80 : { 0, 4, 1 },
81 : { 0xffffffff }
82 : };
83 :
84 : static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
85 : {
86 : { 0, 4, 1 },
87 : { 0xffffffff }
88 : };
89 :
90 : static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
91 : {
92 : { 0, 4, 1 },
93 : { 0xffffffff }
94 : };
95 :
96 : static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
97 : {
98 : { 0, 4, 1 },
99 : { 0xffffffff }
100 : };
101 :
102 : static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
103 : {
104 : { 0, 4, 1 },
105 : { 1, 4, 1 },
106 : { 2, 5, 1 },
107 : { 3, 4, 1 },
108 : { 4, 1, 1 },
109 : { 5, 5, 1 },
110 : { 6, 6, 1 },
111 : { 7, 9, 1 },
112 : { 8, 4, 1 },
113 : { 9, 2, 1 },
114 : { 10, 3, 1 },
115 : { 11, 6, 1 },
116 : { 12, 8, 2 },
117 : { 13, 1, 1 },
118 : { 14, 2, 1 },
119 : { 15, 3, 1 },
120 : { 16, 1, 1 },
121 : { 17, 4, 1 },
122 : { 18, 3, 1 },
123 : { 19, 1, 1 },
124 : { 20, 8, 1 },
125 : { 21, 5, 1 },
126 : { 22, 1, 1 },
127 : { 23, 1, 1 },
128 : { 24, 4, 1 },
129 : { 27, 6, 1 },
130 : { 28, 1, 1 },
131 : { 0xffffffff }
132 : };
133 :
134 : static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
135 : {
136 : { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
137 : };
138 :
139 : static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
140 : {
141 : { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
142 : };
143 :
144 : static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
145 : {
146 : { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
147 : };
148 :
149 : static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
150 : {
151 : { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
152 : };
153 :
154 : static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
155 : {
156 : { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
157 : };
158 :
159 : static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
160 : {
161 : { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
162 : };
163 :
164 : static const struct kv_pt_config_reg didt_config_kv[] =
165 : {
166 : { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
167 : { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
168 : { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
169 : { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
170 : { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
171 : { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
172 : { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
173 : { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
174 : { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
175 : { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
176 : { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
177 : { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
178 : { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
179 : { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
180 : { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
181 : { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
182 : { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
183 : { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
184 : { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
185 : { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
186 : { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
187 : { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
188 : { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
189 : { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
190 : { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
191 : { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
192 : { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
193 : { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
194 : { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
195 : { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
196 : { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
197 : { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
198 : { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
199 : { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
200 : { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
201 : { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
202 : { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
203 : { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
204 : { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
205 : { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
206 : { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
207 : { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
208 : { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
209 : { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
210 : { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
211 : { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
212 : { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
213 : { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
214 : { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
215 : { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
216 : { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
217 : { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
218 : { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
219 : { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
220 : { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
221 : { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
222 : { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
223 : { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
224 : { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
225 : { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
226 : { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
227 : { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
228 : { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
229 : { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
230 : { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
231 : { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
232 : { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
233 : { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
234 : { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
235 : { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
236 : { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
237 : { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
238 : { 0xFFFFFFFF }
239 : };
240 :
241 0 : static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
242 : {
243 0 : struct kv_ps *ps = rps->ps_priv;
244 :
245 0 : return ps;
246 : }
247 :
248 0 : static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
249 : {
250 0 : struct kv_power_info *pi = rdev->pm.dpm.priv;
251 :
252 0 : return pi;
253 : }
254 :
255 : #if 0
256 : static void kv_program_local_cac_table(struct radeon_device *rdev,
257 : const struct kv_lcac_config_values *local_cac_table,
258 : const struct kv_lcac_config_reg *local_cac_reg)
259 : {
260 : u32 i, count, data;
261 : const struct kv_lcac_config_values *values = local_cac_table;
262 :
263 : while (values->block_id != 0xffffffff) {
264 : count = values->signal_id;
265 : for (i = 0; i < count; i++) {
266 : data = ((values->block_id << local_cac_reg->block_shift) &
267 : local_cac_reg->block_mask);
268 : data |= ((i << local_cac_reg->signal_shift) &
269 : local_cac_reg->signal_mask);
270 : data |= ((values->t << local_cac_reg->t_shift) &
271 : local_cac_reg->t_mask);
272 : data |= ((1 << local_cac_reg->enable_shift) &
273 : local_cac_reg->enable_mask);
274 : WREG32_SMC(local_cac_reg->cntl, data);
275 : }
276 : values++;
277 : }
278 : }
279 : #endif
280 :
281 0 : static int kv_program_pt_config_registers(struct radeon_device *rdev,
282 : const struct kv_pt_config_reg *cac_config_regs)
283 : {
284 : const struct kv_pt_config_reg *config_regs = cac_config_regs;
285 : u32 data;
286 : u32 cache = 0;
287 :
288 0 : if (config_regs == NULL)
289 0 : return -EINVAL;
290 :
291 0 : while (config_regs->offset != 0xFFFFFFFF) {
292 0 : if (config_regs->type == KV_CONFIGREG_CACHE) {
293 0 : cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
294 0 : } else {
295 0 : switch (config_regs->type) {
296 : case KV_CONFIGREG_SMC_IND:
297 0 : data = RREG32_SMC(config_regs->offset);
298 0 : break;
299 : case KV_CONFIGREG_DIDT_IND:
300 0 : data = RREG32_DIDT(config_regs->offset);
301 0 : break;
302 : default:
303 0 : data = RREG32(config_regs->offset << 2);
304 0 : break;
305 : }
306 :
307 0 : data &= ~config_regs->mask;
308 0 : data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
309 0 : data |= cache;
310 : cache = 0;
311 :
312 0 : switch (config_regs->type) {
313 : case KV_CONFIGREG_SMC_IND:
314 0 : WREG32_SMC(config_regs->offset, data);
315 0 : break;
316 : case KV_CONFIGREG_DIDT_IND:
317 0 : WREG32_DIDT(config_regs->offset, data);
318 0 : break;
319 : default:
320 0 : WREG32(config_regs->offset << 2, data);
321 0 : break;
322 : }
323 : }
324 0 : config_regs++;
325 : }
326 :
327 0 : return 0;
328 0 : }
329 :
330 0 : static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
331 : {
332 0 : struct kv_power_info *pi = kv_get_pi(rdev);
333 : u32 data;
334 :
335 0 : if (pi->caps_sq_ramping) {
336 0 : data = RREG32_DIDT(DIDT_SQ_CTRL0);
337 0 : if (enable)
338 0 : data |= DIDT_CTRL_EN;
339 : else
340 0 : data &= ~DIDT_CTRL_EN;
341 0 : WREG32_DIDT(DIDT_SQ_CTRL0, data);
342 0 : }
343 :
344 0 : if (pi->caps_db_ramping) {
345 0 : data = RREG32_DIDT(DIDT_DB_CTRL0);
346 0 : if (enable)
347 0 : data |= DIDT_CTRL_EN;
348 : else
349 0 : data &= ~DIDT_CTRL_EN;
350 0 : WREG32_DIDT(DIDT_DB_CTRL0, data);
351 0 : }
352 :
353 0 : if (pi->caps_td_ramping) {
354 0 : data = RREG32_DIDT(DIDT_TD_CTRL0);
355 0 : if (enable)
356 0 : data |= DIDT_CTRL_EN;
357 : else
358 0 : data &= ~DIDT_CTRL_EN;
359 0 : WREG32_DIDT(DIDT_TD_CTRL0, data);
360 0 : }
361 :
362 0 : if (pi->caps_tcp_ramping) {
363 0 : data = RREG32_DIDT(DIDT_TCP_CTRL0);
364 0 : if (enable)
365 0 : data |= DIDT_CTRL_EN;
366 : else
367 0 : data &= ~DIDT_CTRL_EN;
368 0 : WREG32_DIDT(DIDT_TCP_CTRL0, data);
369 0 : }
370 0 : }
371 :
372 0 : static int kv_enable_didt(struct radeon_device *rdev, bool enable)
373 : {
374 0 : struct kv_power_info *pi = kv_get_pi(rdev);
375 : int ret;
376 :
377 0 : if (pi->caps_sq_ramping ||
378 0 : pi->caps_db_ramping ||
379 0 : pi->caps_td_ramping ||
380 0 : pi->caps_tcp_ramping) {
381 0 : cik_enter_rlc_safe_mode(rdev);
382 :
383 0 : if (enable) {
384 0 : ret = kv_program_pt_config_registers(rdev, didt_config_kv);
385 0 : if (ret) {
386 0 : cik_exit_rlc_safe_mode(rdev);
387 0 : return ret;
388 : }
389 : }
390 :
391 0 : kv_do_enable_didt(rdev, enable);
392 :
393 0 : cik_exit_rlc_safe_mode(rdev);
394 0 : }
395 :
396 0 : return 0;
397 0 : }
398 :
399 : #if 0
400 : static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
401 : {
402 : struct kv_power_info *pi = kv_get_pi(rdev);
403 :
404 : if (pi->caps_cac) {
405 : WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
406 : WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
407 : kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
408 :
409 : WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
410 : WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
411 : kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
412 :
413 : WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
414 : WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
415 : kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
416 :
417 : WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
418 : WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
419 : kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
420 :
421 : WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
422 : WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
423 : kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
424 :
425 : WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
426 : WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
427 : kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
428 : }
429 : }
430 : #endif
431 :
432 0 : static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
433 : {
434 0 : struct kv_power_info *pi = kv_get_pi(rdev);
435 : int ret = 0;
436 :
437 0 : if (pi->caps_cac) {
438 0 : if (enable) {
439 0 : ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
440 0 : if (ret)
441 0 : pi->cac_enabled = false;
442 : else
443 0 : pi->cac_enabled = true;
444 0 : } else if (pi->cac_enabled) {
445 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
446 0 : pi->cac_enabled = false;
447 0 : }
448 : }
449 :
450 0 : return ret;
451 : }
452 :
453 0 : static int kv_process_firmware_header(struct radeon_device *rdev)
454 : {
455 0 : struct kv_power_info *pi = kv_get_pi(rdev);
456 0 : u32 tmp;
457 : int ret;
458 :
459 0 : ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
460 : offsetof(SMU7_Firmware_Header, DpmTable),
461 0 : &tmp, pi->sram_end);
462 :
463 0 : if (ret == 0)
464 0 : pi->dpm_table_start = tmp;
465 :
466 0 : ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
467 : offsetof(SMU7_Firmware_Header, SoftRegisters),
468 0 : &tmp, pi->sram_end);
469 :
470 0 : if (ret == 0)
471 0 : pi->soft_regs_start = tmp;
472 :
473 0 : return ret;
474 0 : }
475 :
476 0 : static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
477 : {
478 0 : struct kv_power_info *pi = kv_get_pi(rdev);
479 : int ret;
480 :
481 0 : pi->graphics_voltage_change_enable = 1;
482 :
483 0 : ret = kv_copy_bytes_to_smc(rdev,
484 0 : pi->dpm_table_start +
485 : offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
486 : &pi->graphics_voltage_change_enable,
487 0 : sizeof(u8), pi->sram_end);
488 :
489 0 : return ret;
490 : }
491 :
492 0 : static int kv_set_dpm_interval(struct radeon_device *rdev)
493 : {
494 0 : struct kv_power_info *pi = kv_get_pi(rdev);
495 : int ret;
496 :
497 0 : pi->graphics_interval = 1;
498 :
499 0 : ret = kv_copy_bytes_to_smc(rdev,
500 0 : pi->dpm_table_start +
501 : offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
502 : &pi->graphics_interval,
503 0 : sizeof(u8), pi->sram_end);
504 :
505 0 : return ret;
506 : }
507 :
508 0 : static int kv_set_dpm_boot_state(struct radeon_device *rdev)
509 : {
510 0 : struct kv_power_info *pi = kv_get_pi(rdev);
511 : int ret;
512 :
513 0 : ret = kv_copy_bytes_to_smc(rdev,
514 0 : pi->dpm_table_start +
515 : offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
516 0 : &pi->graphics_boot_level,
517 0 : sizeof(u8), pi->sram_end);
518 :
519 0 : return ret;
520 : }
521 :
522 0 : static void kv_program_vc(struct radeon_device *rdev)
523 : {
524 0 : WREG32_SMC(CG_FTV_0, 0x3FFFC100);
525 0 : }
526 :
527 0 : static void kv_clear_vc(struct radeon_device *rdev)
528 : {
529 0 : WREG32_SMC(CG_FTV_0, 0);
530 0 : }
531 :
532 0 : static int kv_set_divider_value(struct radeon_device *rdev,
533 : u32 index, u32 sclk)
534 : {
535 0 : struct kv_power_info *pi = kv_get_pi(rdev);
536 0 : struct atom_clock_dividers dividers;
537 : int ret;
538 :
539 0 : ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
540 : sclk, false, ÷rs);
541 0 : if (ret)
542 0 : return ret;
543 :
544 0 : pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
545 0 : pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
546 :
547 0 : return 0;
548 0 : }
549 :
550 0 : static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
551 : struct sumo_vid_mapping_table *vid_mapping_table,
552 : u32 vid_2bit)
553 : {
554 : struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
555 0 : &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
556 : u32 i;
557 :
558 0 : if (vddc_sclk_table && vddc_sclk_table->count) {
559 0 : if (vid_2bit < vddc_sclk_table->count)
560 0 : return vddc_sclk_table->entries[vid_2bit].v;
561 : else
562 0 : return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
563 : } else {
564 0 : for (i = 0; i < vid_mapping_table->num_entries; i++) {
565 0 : if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
566 0 : return vid_mapping_table->entries[i].vid_7bit;
567 : }
568 0 : return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
569 : }
570 0 : }
571 :
572 0 : static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
573 : struct sumo_vid_mapping_table *vid_mapping_table,
574 : u32 vid_7bit)
575 : {
576 : struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
577 0 : &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
578 : u32 i;
579 :
580 0 : if (vddc_sclk_table && vddc_sclk_table->count) {
581 0 : for (i = 0; i < vddc_sclk_table->count; i++) {
582 0 : if (vddc_sclk_table->entries[i].v == vid_7bit)
583 0 : return i;
584 : }
585 0 : return vddc_sclk_table->count - 1;
586 : } else {
587 0 : for (i = 0; i < vid_mapping_table->num_entries; i++) {
588 0 : if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
589 0 : return vid_mapping_table->entries[i].vid_2bit;
590 : }
591 :
592 0 : return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
593 : }
594 0 : }
595 :
596 0 : static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
597 : u16 voltage)
598 : {
599 0 : return 6200 - (voltage * 25);
600 : }
601 :
602 0 : static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
603 : u32 vid_2bit)
604 : {
605 0 : struct kv_power_info *pi = kv_get_pi(rdev);
606 0 : u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
607 0 : &pi->sys_info.vid_mapping_table,
608 : vid_2bit);
609 :
610 0 : return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
611 : }
612 :
613 :
614 0 : static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
615 : {
616 0 : struct kv_power_info *pi = kv_get_pi(rdev);
617 :
618 0 : pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
619 0 : pi->graphics_level[index].MinVddNb =
620 0 : cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
621 :
622 0 : return 0;
623 : }
624 :
625 0 : static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
626 : {
627 0 : struct kv_power_info *pi = kv_get_pi(rdev);
628 :
629 0 : pi->graphics_level[index].AT = cpu_to_be16((u16)at);
630 :
631 0 : return 0;
632 : }
633 :
634 0 : static void kv_dpm_power_level_enable(struct radeon_device *rdev,
635 : u32 index, bool enable)
636 : {
637 0 : struct kv_power_info *pi = kv_get_pi(rdev);
638 :
639 0 : pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
640 0 : }
641 :
642 0 : static void kv_start_dpm(struct radeon_device *rdev)
643 : {
644 0 : u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
645 :
646 0 : tmp |= GLOBAL_PWRMGT_EN;
647 0 : WREG32_SMC(GENERAL_PWRMGT, tmp);
648 :
649 0 : kv_smc_dpm_enable(rdev, true);
650 0 : }
651 :
652 0 : static void kv_stop_dpm(struct radeon_device *rdev)
653 : {
654 0 : kv_smc_dpm_enable(rdev, false);
655 0 : }
656 :
657 0 : static void kv_start_am(struct radeon_device *rdev)
658 : {
659 0 : u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
660 :
661 0 : sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
662 0 : sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
663 :
664 0 : WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
665 0 : }
666 :
667 0 : static void kv_reset_am(struct radeon_device *rdev)
668 : {
669 0 : u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
670 :
671 0 : sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
672 :
673 0 : WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
674 0 : }
675 :
676 0 : static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
677 : {
678 0 : return kv_notify_message_to_smu(rdev, freeze ?
679 : PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
680 : }
681 :
682 0 : static int kv_force_lowest_valid(struct radeon_device *rdev)
683 : {
684 0 : return kv_force_dpm_lowest(rdev);
685 : }
686 :
687 0 : static int kv_unforce_levels(struct radeon_device *rdev)
688 : {
689 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
690 0 : return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
691 : else
692 0 : return kv_set_enabled_levels(rdev);
693 0 : }
694 :
695 0 : static int kv_update_sclk_t(struct radeon_device *rdev)
696 : {
697 0 : struct kv_power_info *pi = kv_get_pi(rdev);
698 0 : u32 low_sclk_interrupt_t = 0;
699 : int ret = 0;
700 :
701 0 : if (pi->caps_sclk_throttle_low_notification) {
702 0 : low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
703 :
704 0 : ret = kv_copy_bytes_to_smc(rdev,
705 0 : pi->dpm_table_start +
706 : offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
707 : (u8 *)&low_sclk_interrupt_t,
708 0 : sizeof(u32), pi->sram_end);
709 0 : }
710 0 : return ret;
711 0 : }
712 :
713 0 : static int kv_program_bootup_state(struct radeon_device *rdev)
714 : {
715 0 : struct kv_power_info *pi = kv_get_pi(rdev);
716 : u32 i;
717 : struct radeon_clock_voltage_dependency_table *table =
718 0 : &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
719 :
720 0 : if (table && table->count) {
721 0 : for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
722 0 : if (table->entries[i].clk == pi->boot_pl.sclk)
723 : break;
724 : }
725 :
726 0 : pi->graphics_boot_level = (u8)i;
727 0 : kv_dpm_power_level_enable(rdev, i, true);
728 0 : } else {
729 : struct sumo_sclk_voltage_mapping_table *table =
730 0 : &pi->sys_info.sclk_voltage_mapping_table;
731 :
732 0 : if (table->num_max_dpm_entries == 0)
733 0 : return -EINVAL;
734 :
735 0 : for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
736 0 : if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
737 : break;
738 : }
739 :
740 0 : pi->graphics_boot_level = (u8)i;
741 0 : kv_dpm_power_level_enable(rdev, i, true);
742 0 : }
743 0 : return 0;
744 0 : }
745 :
746 0 : static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
747 : {
748 0 : struct kv_power_info *pi = kv_get_pi(rdev);
749 : int ret;
750 :
751 0 : pi->graphics_therm_throttle_enable = 1;
752 :
753 0 : ret = kv_copy_bytes_to_smc(rdev,
754 0 : pi->dpm_table_start +
755 : offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
756 : &pi->graphics_therm_throttle_enable,
757 0 : sizeof(u8), pi->sram_end);
758 :
759 0 : return ret;
760 : }
761 :
762 0 : static int kv_upload_dpm_settings(struct radeon_device *rdev)
763 : {
764 0 : struct kv_power_info *pi = kv_get_pi(rdev);
765 : int ret;
766 :
767 0 : ret = kv_copy_bytes_to_smc(rdev,
768 0 : pi->dpm_table_start +
769 : offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
770 0 : (u8 *)&pi->graphics_level,
771 : sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
772 0 : pi->sram_end);
773 :
774 0 : if (ret)
775 0 : return ret;
776 :
777 0 : ret = kv_copy_bytes_to_smc(rdev,
778 0 : pi->dpm_table_start +
779 : offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
780 0 : &pi->graphics_dpm_level_count,
781 0 : sizeof(u8), pi->sram_end);
782 :
783 0 : return ret;
784 0 : }
785 :
786 0 : static u32 kv_get_clock_difference(u32 a, u32 b)
787 : {
788 0 : return (a >= b) ? a - b : b - a;
789 : }
790 :
791 0 : static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
792 : {
793 0 : struct kv_power_info *pi = kv_get_pi(rdev);
794 : u32 value;
795 :
796 0 : if (pi->caps_enable_dfs_bypass) {
797 0 : if (kv_get_clock_difference(clk, 40000) < 200)
798 0 : value = 3;
799 0 : else if (kv_get_clock_difference(clk, 30000) < 200)
800 0 : value = 2;
801 0 : else if (kv_get_clock_difference(clk, 20000) < 200)
802 0 : value = 7;
803 0 : else if (kv_get_clock_difference(clk, 15000) < 200)
804 0 : value = 6;
805 0 : else if (kv_get_clock_difference(clk, 10000) < 200)
806 0 : value = 8;
807 : else
808 : value = 0;
809 : } else {
810 : value = 0;
811 : }
812 :
813 0 : return value;
814 : }
815 :
816 0 : static int kv_populate_uvd_table(struct radeon_device *rdev)
817 : {
818 0 : struct kv_power_info *pi = kv_get_pi(rdev);
819 : struct radeon_uvd_clock_voltage_dependency_table *table =
820 0 : &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
821 0 : struct atom_clock_dividers dividers;
822 : int ret;
823 : u32 i;
824 :
825 0 : if (table == NULL || table->count == 0)
826 0 : return 0;
827 :
828 0 : pi->uvd_level_count = 0;
829 0 : for (i = 0; i < table->count; i++) {
830 0 : if (pi->high_voltage_t &&
831 0 : (pi->high_voltage_t < table->entries[i].v))
832 : break;
833 :
834 0 : pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
835 0 : pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
836 0 : pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
837 :
838 0 : pi->uvd_level[i].VClkBypassCntl =
839 0 : (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
840 0 : pi->uvd_level[i].DClkBypassCntl =
841 0 : (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
842 :
843 0 : ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
844 0 : table->entries[i].vclk, false, ÷rs);
845 0 : if (ret)
846 0 : return ret;
847 0 : pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
848 :
849 0 : ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
850 0 : table->entries[i].dclk, false, ÷rs);
851 0 : if (ret)
852 0 : return ret;
853 0 : pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
854 :
855 0 : pi->uvd_level_count++;
856 : }
857 :
858 0 : ret = kv_copy_bytes_to_smc(rdev,
859 0 : pi->dpm_table_start +
860 : offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
861 : (u8 *)&pi->uvd_level_count,
862 0 : sizeof(u8), pi->sram_end);
863 0 : if (ret)
864 0 : return ret;
865 :
866 0 : pi->uvd_interval = 1;
867 :
868 0 : ret = kv_copy_bytes_to_smc(rdev,
869 0 : pi->dpm_table_start +
870 : offsetof(SMU7_Fusion_DpmTable, UVDInterval),
871 : &pi->uvd_interval,
872 0 : sizeof(u8), pi->sram_end);
873 0 : if (ret)
874 0 : return ret;
875 :
876 0 : ret = kv_copy_bytes_to_smc(rdev,
877 0 : pi->dpm_table_start +
878 : offsetof(SMU7_Fusion_DpmTable, UvdLevel),
879 0 : (u8 *)&pi->uvd_level,
880 : sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
881 0 : pi->sram_end);
882 :
883 0 : return ret;
884 :
885 0 : }
886 :
887 0 : static int kv_populate_vce_table(struct radeon_device *rdev)
888 : {
889 0 : struct kv_power_info *pi = kv_get_pi(rdev);
890 : int ret;
891 : u32 i;
892 : struct radeon_vce_clock_voltage_dependency_table *table =
893 0 : &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
894 0 : struct atom_clock_dividers dividers;
895 :
896 0 : if (table == NULL || table->count == 0)
897 0 : return 0;
898 :
899 0 : pi->vce_level_count = 0;
900 0 : for (i = 0; i < table->count; i++) {
901 0 : if (pi->high_voltage_t &&
902 0 : pi->high_voltage_t < table->entries[i].v)
903 : break;
904 :
905 0 : pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
906 0 : pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
907 :
908 0 : pi->vce_level[i].ClkBypassCntl =
909 0 : (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
910 :
911 0 : ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
912 0 : table->entries[i].evclk, false, ÷rs);
913 0 : if (ret)
914 0 : return ret;
915 0 : pi->vce_level[i].Divider = (u8)dividers.post_div;
916 :
917 0 : pi->vce_level_count++;
918 : }
919 :
920 0 : ret = kv_copy_bytes_to_smc(rdev,
921 0 : pi->dpm_table_start +
922 : offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
923 : (u8 *)&pi->vce_level_count,
924 : sizeof(u8),
925 0 : pi->sram_end);
926 0 : if (ret)
927 0 : return ret;
928 :
929 0 : pi->vce_interval = 1;
930 :
931 0 : ret = kv_copy_bytes_to_smc(rdev,
932 0 : pi->dpm_table_start +
933 : offsetof(SMU7_Fusion_DpmTable, VCEInterval),
934 : (u8 *)&pi->vce_interval,
935 : sizeof(u8),
936 0 : pi->sram_end);
937 0 : if (ret)
938 0 : return ret;
939 :
940 0 : ret = kv_copy_bytes_to_smc(rdev,
941 0 : pi->dpm_table_start +
942 : offsetof(SMU7_Fusion_DpmTable, VceLevel),
943 0 : (u8 *)&pi->vce_level,
944 : sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
945 0 : pi->sram_end);
946 :
947 0 : return ret;
948 0 : }
949 :
950 0 : static int kv_populate_samu_table(struct radeon_device *rdev)
951 : {
952 0 : struct kv_power_info *pi = kv_get_pi(rdev);
953 : struct radeon_clock_voltage_dependency_table *table =
954 0 : &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
955 0 : struct atom_clock_dividers dividers;
956 : int ret;
957 : u32 i;
958 :
959 0 : if (table == NULL || table->count == 0)
960 0 : return 0;
961 :
962 0 : pi->samu_level_count = 0;
963 0 : for (i = 0; i < table->count; i++) {
964 0 : if (pi->high_voltage_t &&
965 0 : pi->high_voltage_t < table->entries[i].v)
966 : break;
967 :
968 0 : pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
969 0 : pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
970 :
971 0 : pi->samu_level[i].ClkBypassCntl =
972 0 : (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
973 :
974 0 : ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
975 0 : table->entries[i].clk, false, ÷rs);
976 0 : if (ret)
977 0 : return ret;
978 0 : pi->samu_level[i].Divider = (u8)dividers.post_div;
979 :
980 0 : pi->samu_level_count++;
981 : }
982 :
983 0 : ret = kv_copy_bytes_to_smc(rdev,
984 0 : pi->dpm_table_start +
985 : offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
986 : (u8 *)&pi->samu_level_count,
987 : sizeof(u8),
988 0 : pi->sram_end);
989 0 : if (ret)
990 0 : return ret;
991 :
992 0 : pi->samu_interval = 1;
993 :
994 0 : ret = kv_copy_bytes_to_smc(rdev,
995 0 : pi->dpm_table_start +
996 : offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
997 : (u8 *)&pi->samu_interval,
998 : sizeof(u8),
999 0 : pi->sram_end);
1000 0 : if (ret)
1001 0 : return ret;
1002 :
1003 0 : ret = kv_copy_bytes_to_smc(rdev,
1004 0 : pi->dpm_table_start +
1005 : offsetof(SMU7_Fusion_DpmTable, SamuLevel),
1006 0 : (u8 *)&pi->samu_level,
1007 : sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
1008 0 : pi->sram_end);
1009 : if (ret)
1010 0 : return ret;
1011 :
1012 : return ret;
1013 0 : }
1014 :
1015 :
1016 0 : static int kv_populate_acp_table(struct radeon_device *rdev)
1017 : {
1018 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1019 : struct radeon_clock_voltage_dependency_table *table =
1020 0 : &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1021 0 : struct atom_clock_dividers dividers;
1022 : int ret;
1023 : u32 i;
1024 :
1025 0 : if (table == NULL || table->count == 0)
1026 0 : return 0;
1027 :
1028 0 : pi->acp_level_count = 0;
1029 0 : for (i = 0; i < table->count; i++) {
1030 0 : pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
1031 0 : pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
1032 :
1033 0 : ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1034 0 : table->entries[i].clk, false, ÷rs);
1035 0 : if (ret)
1036 0 : return ret;
1037 0 : pi->acp_level[i].Divider = (u8)dividers.post_div;
1038 :
1039 0 : pi->acp_level_count++;
1040 : }
1041 :
1042 0 : ret = kv_copy_bytes_to_smc(rdev,
1043 0 : pi->dpm_table_start +
1044 : offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
1045 : (u8 *)&pi->acp_level_count,
1046 : sizeof(u8),
1047 0 : pi->sram_end);
1048 0 : if (ret)
1049 0 : return ret;
1050 :
1051 0 : pi->acp_interval = 1;
1052 :
1053 0 : ret = kv_copy_bytes_to_smc(rdev,
1054 0 : pi->dpm_table_start +
1055 : offsetof(SMU7_Fusion_DpmTable, ACPInterval),
1056 : (u8 *)&pi->acp_interval,
1057 : sizeof(u8),
1058 0 : pi->sram_end);
1059 0 : if (ret)
1060 0 : return ret;
1061 :
1062 0 : ret = kv_copy_bytes_to_smc(rdev,
1063 0 : pi->dpm_table_start +
1064 : offsetof(SMU7_Fusion_DpmTable, AcpLevel),
1065 0 : (u8 *)&pi->acp_level,
1066 : sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
1067 0 : pi->sram_end);
1068 : if (ret)
1069 0 : return ret;
1070 :
1071 : return ret;
1072 0 : }
1073 :
1074 0 : static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
1075 : {
1076 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1077 : u32 i;
1078 : struct radeon_clock_voltage_dependency_table *table =
1079 0 : &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1080 :
1081 0 : if (table && table->count) {
1082 0 : for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1083 0 : if (pi->caps_enable_dfs_bypass) {
1084 0 : if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
1085 0 : pi->graphics_level[i].ClkBypassCntl = 3;
1086 0 : else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
1087 0 : pi->graphics_level[i].ClkBypassCntl = 2;
1088 0 : else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
1089 0 : pi->graphics_level[i].ClkBypassCntl = 7;
1090 0 : else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
1091 0 : pi->graphics_level[i].ClkBypassCntl = 6;
1092 0 : else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
1093 0 : pi->graphics_level[i].ClkBypassCntl = 8;
1094 : else
1095 0 : pi->graphics_level[i].ClkBypassCntl = 0;
1096 : } else {
1097 0 : pi->graphics_level[i].ClkBypassCntl = 0;
1098 : }
1099 : }
1100 : } else {
1101 : struct sumo_sclk_voltage_mapping_table *table =
1102 0 : &pi->sys_info.sclk_voltage_mapping_table;
1103 0 : for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1104 0 : if (pi->caps_enable_dfs_bypass) {
1105 0 : if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
1106 0 : pi->graphics_level[i].ClkBypassCntl = 3;
1107 0 : else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
1108 0 : pi->graphics_level[i].ClkBypassCntl = 2;
1109 0 : else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
1110 0 : pi->graphics_level[i].ClkBypassCntl = 7;
1111 0 : else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
1112 0 : pi->graphics_level[i].ClkBypassCntl = 6;
1113 0 : else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
1114 0 : pi->graphics_level[i].ClkBypassCntl = 8;
1115 : else
1116 0 : pi->graphics_level[i].ClkBypassCntl = 0;
1117 : } else {
1118 0 : pi->graphics_level[i].ClkBypassCntl = 0;
1119 : }
1120 : }
1121 : }
1122 0 : }
1123 :
1124 0 : static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1125 : {
1126 0 : return kv_notify_message_to_smu(rdev, enable ?
1127 : PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1128 : }
1129 :
1130 0 : static void kv_reset_acp_boot_level(struct radeon_device *rdev)
1131 : {
1132 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1133 :
1134 0 : pi->acp_boot_level = 0xff;
1135 0 : }
1136 :
1137 0 : static void kv_update_current_ps(struct radeon_device *rdev,
1138 : struct radeon_ps *rps)
1139 : {
1140 0 : struct kv_ps *new_ps = kv_get_ps(rps);
1141 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1142 :
1143 0 : pi->current_rps = *rps;
1144 0 : pi->current_ps = *new_ps;
1145 0 : pi->current_rps.ps_priv = &pi->current_ps;
1146 0 : }
1147 :
1148 0 : static void kv_update_requested_ps(struct radeon_device *rdev,
1149 : struct radeon_ps *rps)
1150 : {
1151 0 : struct kv_ps *new_ps = kv_get_ps(rps);
1152 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1153 :
1154 0 : pi->requested_rps = *rps;
1155 0 : pi->requested_ps = *new_ps;
1156 0 : pi->requested_rps.ps_priv = &pi->requested_ps;
1157 0 : }
1158 :
1159 0 : void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1160 : {
1161 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1162 : int ret;
1163 :
1164 0 : if (pi->bapm_enable) {
1165 0 : ret = kv_smc_bapm_enable(rdev, enable);
1166 0 : if (ret)
1167 0 : DRM_ERROR("kv_smc_bapm_enable failed\n");
1168 : }
1169 0 : }
1170 :
1171 0 : static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
1172 : {
1173 : u32 thermal_int;
1174 :
1175 0 : thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
1176 0 : if (enable)
1177 0 : thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
1178 : else
1179 0 : thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
1180 0 : WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
1181 :
1182 0 : }
1183 :
1184 0 : int kv_dpm_enable(struct radeon_device *rdev)
1185 : {
1186 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1187 : int ret;
1188 :
1189 0 : ret = kv_process_firmware_header(rdev);
1190 0 : if (ret) {
1191 0 : DRM_ERROR("kv_process_firmware_header failed\n");
1192 0 : return ret;
1193 : }
1194 0 : kv_init_fps_limits(rdev);
1195 0 : kv_init_graphics_levels(rdev);
1196 0 : ret = kv_program_bootup_state(rdev);
1197 0 : if (ret) {
1198 0 : DRM_ERROR("kv_program_bootup_state failed\n");
1199 0 : return ret;
1200 : }
1201 0 : kv_calculate_dfs_bypass_settings(rdev);
1202 0 : ret = kv_upload_dpm_settings(rdev);
1203 0 : if (ret) {
1204 0 : DRM_ERROR("kv_upload_dpm_settings failed\n");
1205 0 : return ret;
1206 : }
1207 0 : ret = kv_populate_uvd_table(rdev);
1208 0 : if (ret) {
1209 0 : DRM_ERROR("kv_populate_uvd_table failed\n");
1210 0 : return ret;
1211 : }
1212 0 : ret = kv_populate_vce_table(rdev);
1213 0 : if (ret) {
1214 0 : DRM_ERROR("kv_populate_vce_table failed\n");
1215 0 : return ret;
1216 : }
1217 0 : ret = kv_populate_samu_table(rdev);
1218 0 : if (ret) {
1219 0 : DRM_ERROR("kv_populate_samu_table failed\n");
1220 0 : return ret;
1221 : }
1222 0 : ret = kv_populate_acp_table(rdev);
1223 0 : if (ret) {
1224 0 : DRM_ERROR("kv_populate_acp_table failed\n");
1225 0 : return ret;
1226 : }
1227 0 : kv_program_vc(rdev);
1228 : #if 0
1229 : kv_initialize_hardware_cac_manager(rdev);
1230 : #endif
1231 0 : kv_start_am(rdev);
1232 0 : if (pi->enable_auto_thermal_throttling) {
1233 0 : ret = kv_enable_auto_thermal_throttling(rdev);
1234 0 : if (ret) {
1235 0 : DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1236 0 : return ret;
1237 : }
1238 : }
1239 0 : ret = kv_enable_dpm_voltage_scaling(rdev);
1240 0 : if (ret) {
1241 0 : DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1242 0 : return ret;
1243 : }
1244 0 : ret = kv_set_dpm_interval(rdev);
1245 0 : if (ret) {
1246 0 : DRM_ERROR("kv_set_dpm_interval failed\n");
1247 0 : return ret;
1248 : }
1249 0 : ret = kv_set_dpm_boot_state(rdev);
1250 0 : if (ret) {
1251 0 : DRM_ERROR("kv_set_dpm_boot_state failed\n");
1252 0 : return ret;
1253 : }
1254 0 : ret = kv_enable_ulv(rdev, true);
1255 0 : if (ret) {
1256 0 : DRM_ERROR("kv_enable_ulv failed\n");
1257 0 : return ret;
1258 : }
1259 0 : kv_start_dpm(rdev);
1260 0 : ret = kv_enable_didt(rdev, true);
1261 0 : if (ret) {
1262 0 : DRM_ERROR("kv_enable_didt failed\n");
1263 0 : return ret;
1264 : }
1265 0 : ret = kv_enable_smc_cac(rdev, true);
1266 0 : if (ret) {
1267 0 : DRM_ERROR("kv_enable_smc_cac failed\n");
1268 0 : return ret;
1269 : }
1270 :
1271 0 : kv_reset_acp_boot_level(rdev);
1272 :
1273 0 : ret = kv_smc_bapm_enable(rdev, false);
1274 0 : if (ret) {
1275 0 : DRM_ERROR("kv_smc_bapm_enable failed\n");
1276 0 : return ret;
1277 : }
1278 :
1279 0 : kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1280 :
1281 0 : return ret;
1282 0 : }
1283 :
1284 0 : int kv_dpm_late_enable(struct radeon_device *rdev)
1285 : {
1286 : int ret = 0;
1287 :
1288 0 : if (rdev->irq.installed &&
1289 0 : r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1290 0 : ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1291 0 : if (ret) {
1292 0 : DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1293 0 : return ret;
1294 : }
1295 0 : kv_enable_thermal_int(rdev, true);
1296 0 : }
1297 :
1298 : /* powerdown unused blocks for now */
1299 0 : kv_dpm_powergate_acp(rdev, true);
1300 0 : kv_dpm_powergate_samu(rdev, true);
1301 0 : kv_dpm_powergate_vce(rdev, true);
1302 0 : kv_dpm_powergate_uvd(rdev, true);
1303 :
1304 0 : return ret;
1305 0 : }
1306 :
1307 0 : void kv_dpm_disable(struct radeon_device *rdev)
1308 : {
1309 0 : kv_smc_bapm_enable(rdev, false);
1310 :
1311 0 : if (rdev->family == CHIP_MULLINS)
1312 0 : kv_enable_nb_dpm(rdev, false);
1313 :
1314 : /* powerup blocks */
1315 0 : kv_dpm_powergate_acp(rdev, false);
1316 0 : kv_dpm_powergate_samu(rdev, false);
1317 0 : kv_dpm_powergate_vce(rdev, false);
1318 0 : kv_dpm_powergate_uvd(rdev, false);
1319 :
1320 0 : kv_enable_smc_cac(rdev, false);
1321 0 : kv_enable_didt(rdev, false);
1322 0 : kv_clear_vc(rdev);
1323 0 : kv_stop_dpm(rdev);
1324 0 : kv_enable_ulv(rdev, false);
1325 0 : kv_reset_am(rdev);
1326 0 : kv_enable_thermal_int(rdev, false);
1327 :
1328 0 : kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1329 0 : }
1330 :
1331 : #if 0
1332 : static int kv_write_smc_soft_register(struct radeon_device *rdev,
1333 : u16 reg_offset, u32 value)
1334 : {
1335 : struct kv_power_info *pi = kv_get_pi(rdev);
1336 :
1337 : return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
1338 : (u8 *)&value, sizeof(u16), pi->sram_end);
1339 : }
1340 :
1341 : static int kv_read_smc_soft_register(struct radeon_device *rdev,
1342 : u16 reg_offset, u32 *value)
1343 : {
1344 : struct kv_power_info *pi = kv_get_pi(rdev);
1345 :
1346 : return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
1347 : value, pi->sram_end);
1348 : }
1349 : #endif
1350 :
1351 0 : static void kv_init_sclk_t(struct radeon_device *rdev)
1352 : {
1353 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1354 :
1355 0 : pi->low_sclk_interrupt_t = 0;
1356 0 : }
1357 :
1358 0 : static int kv_init_fps_limits(struct radeon_device *rdev)
1359 : {
1360 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1361 : int ret = 0;
1362 :
1363 0 : if (pi->caps_fps) {
1364 : u16 tmp;
1365 :
1366 : tmp = 45;
1367 0 : pi->fps_high_t = cpu_to_be16(tmp);
1368 0 : ret = kv_copy_bytes_to_smc(rdev,
1369 0 : pi->dpm_table_start +
1370 : offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1371 0 : (u8 *)&pi->fps_high_t,
1372 0 : sizeof(u16), pi->sram_end);
1373 :
1374 : tmp = 30;
1375 0 : pi->fps_low_t = cpu_to_be16(tmp);
1376 :
1377 0 : ret = kv_copy_bytes_to_smc(rdev,
1378 0 : pi->dpm_table_start +
1379 : offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1380 : (u8 *)&pi->fps_low_t,
1381 0 : sizeof(u16), pi->sram_end);
1382 :
1383 0 : }
1384 0 : return ret;
1385 : }
1386 :
1387 0 : static void kv_init_powergate_state(struct radeon_device *rdev)
1388 : {
1389 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1390 :
1391 0 : pi->uvd_power_gated = false;
1392 0 : pi->vce_power_gated = false;
1393 0 : pi->samu_power_gated = false;
1394 0 : pi->acp_power_gated = false;
1395 :
1396 0 : }
1397 :
1398 0 : static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1399 : {
1400 0 : return kv_notify_message_to_smu(rdev, enable ?
1401 : PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1402 : }
1403 :
1404 0 : static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1405 : {
1406 0 : return kv_notify_message_to_smu(rdev, enable ?
1407 : PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1408 : }
1409 :
1410 0 : static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1411 : {
1412 0 : return kv_notify_message_to_smu(rdev, enable ?
1413 : PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1414 : }
1415 :
1416 0 : static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
1417 : {
1418 0 : return kv_notify_message_to_smu(rdev, enable ?
1419 : PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1420 : }
1421 :
1422 0 : static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1423 : {
1424 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1425 : struct radeon_uvd_clock_voltage_dependency_table *table =
1426 0 : &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1427 : int ret;
1428 : u32 mask;
1429 :
1430 0 : if (!gate) {
1431 0 : if (table->count)
1432 0 : pi->uvd_boot_level = table->count - 1;
1433 : else
1434 0 : pi->uvd_boot_level = 0;
1435 :
1436 0 : if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
1437 0 : mask = 1 << pi->uvd_boot_level;
1438 0 : } else {
1439 : mask = 0x1f;
1440 : }
1441 :
1442 0 : ret = kv_copy_bytes_to_smc(rdev,
1443 0 : pi->dpm_table_start +
1444 : offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1445 0 : (uint8_t *)&pi->uvd_boot_level,
1446 0 : sizeof(u8), pi->sram_end);
1447 0 : if (ret)
1448 0 : return ret;
1449 :
1450 0 : kv_send_msg_to_smc_with_parameter(rdev,
1451 : PPSMC_MSG_UVDDPM_SetEnabledMask,
1452 : mask);
1453 0 : }
1454 :
1455 0 : return kv_enable_uvd_dpm(rdev, !gate);
1456 0 : }
1457 :
1458 0 : static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
1459 : {
1460 : u8 i;
1461 : struct radeon_vce_clock_voltage_dependency_table *table =
1462 0 : &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1463 :
1464 0 : for (i = 0; i < table->count; i++) {
1465 0 : if (table->entries[i].evclk >= evclk)
1466 : break;
1467 : }
1468 :
1469 0 : return i;
1470 : }
1471 :
1472 0 : static int kv_update_vce_dpm(struct radeon_device *rdev,
1473 : struct radeon_ps *radeon_new_state,
1474 : struct radeon_ps *radeon_current_state)
1475 : {
1476 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1477 : struct radeon_vce_clock_voltage_dependency_table *table =
1478 0 : &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1479 : int ret;
1480 :
1481 0 : if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1482 0 : kv_dpm_powergate_vce(rdev, false);
1483 : /* turn the clocks on when encoding */
1484 0 : cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
1485 0 : if (pi->caps_stable_p_state)
1486 0 : pi->vce_boot_level = table->count - 1;
1487 : else
1488 0 : pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
1489 :
1490 0 : ret = kv_copy_bytes_to_smc(rdev,
1491 0 : pi->dpm_table_start +
1492 : offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1493 0 : (u8 *)&pi->vce_boot_level,
1494 : sizeof(u8),
1495 0 : pi->sram_end);
1496 0 : if (ret)
1497 0 : return ret;
1498 :
1499 0 : if (pi->caps_stable_p_state)
1500 0 : kv_send_msg_to_smc_with_parameter(rdev,
1501 : PPSMC_MSG_VCEDPM_SetEnabledMask,
1502 0 : (1 << pi->vce_boot_level));
1503 :
1504 0 : kv_enable_vce_dpm(rdev, true);
1505 0 : } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1506 0 : kv_enable_vce_dpm(rdev, false);
1507 : /* turn the clocks off when not encoding */
1508 0 : cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
1509 0 : kv_dpm_powergate_vce(rdev, true);
1510 0 : }
1511 :
1512 0 : return 0;
1513 0 : }
1514 :
1515 0 : static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1516 : {
1517 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1518 : struct radeon_clock_voltage_dependency_table *table =
1519 0 : &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1520 : int ret;
1521 :
1522 0 : if (!gate) {
1523 0 : if (pi->caps_stable_p_state)
1524 0 : pi->samu_boot_level = table->count - 1;
1525 : else
1526 0 : pi->samu_boot_level = 0;
1527 :
1528 0 : ret = kv_copy_bytes_to_smc(rdev,
1529 0 : pi->dpm_table_start +
1530 : offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1531 0 : (u8 *)&pi->samu_boot_level,
1532 : sizeof(u8),
1533 0 : pi->sram_end);
1534 0 : if (ret)
1535 0 : return ret;
1536 :
1537 0 : if (pi->caps_stable_p_state)
1538 0 : kv_send_msg_to_smc_with_parameter(rdev,
1539 : PPSMC_MSG_SAMUDPM_SetEnabledMask,
1540 0 : (1 << pi->samu_boot_level));
1541 : }
1542 :
1543 0 : return kv_enable_samu_dpm(rdev, !gate);
1544 0 : }
1545 :
1546 0 : static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
1547 : {
1548 : u8 i;
1549 : struct radeon_clock_voltage_dependency_table *table =
1550 0 : &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1551 :
1552 0 : for (i = 0; i < table->count; i++) {
1553 0 : if (table->entries[i].clk >= 0) /* XXX */
1554 : break;
1555 : }
1556 :
1557 0 : if (i >= table->count)
1558 0 : i = table->count - 1;
1559 :
1560 0 : return i;
1561 : }
1562 :
1563 0 : static void kv_update_acp_boot_level(struct radeon_device *rdev)
1564 : {
1565 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1566 : u8 acp_boot_level;
1567 :
1568 0 : if (!pi->caps_stable_p_state) {
1569 0 : acp_boot_level = kv_get_acp_boot_level(rdev);
1570 0 : if (acp_boot_level != pi->acp_boot_level) {
1571 0 : pi->acp_boot_level = acp_boot_level;
1572 0 : kv_send_msg_to_smc_with_parameter(rdev,
1573 : PPSMC_MSG_ACPDPM_SetEnabledMask,
1574 0 : (1 << pi->acp_boot_level));
1575 0 : }
1576 : }
1577 0 : }
1578 :
1579 0 : static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1580 : {
1581 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1582 : struct radeon_clock_voltage_dependency_table *table =
1583 0 : &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1584 : int ret;
1585 :
1586 0 : if (!gate) {
1587 0 : if (pi->caps_stable_p_state)
1588 0 : pi->acp_boot_level = table->count - 1;
1589 : else
1590 0 : pi->acp_boot_level = kv_get_acp_boot_level(rdev);
1591 :
1592 0 : ret = kv_copy_bytes_to_smc(rdev,
1593 0 : pi->dpm_table_start +
1594 : offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1595 0 : (u8 *)&pi->acp_boot_level,
1596 : sizeof(u8),
1597 0 : pi->sram_end);
1598 0 : if (ret)
1599 0 : return ret;
1600 :
1601 0 : if (pi->caps_stable_p_state)
1602 0 : kv_send_msg_to_smc_with_parameter(rdev,
1603 : PPSMC_MSG_ACPDPM_SetEnabledMask,
1604 0 : (1 << pi->acp_boot_level));
1605 : }
1606 :
1607 0 : return kv_enable_acp_dpm(rdev, !gate);
1608 0 : }
1609 :
1610 0 : void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
1611 : {
1612 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1613 :
1614 0 : if (pi->uvd_power_gated == gate)
1615 0 : return;
1616 :
1617 0 : pi->uvd_power_gated = gate;
1618 :
1619 0 : if (gate) {
1620 0 : if (pi->caps_uvd_pg) {
1621 0 : uvd_v1_0_stop(rdev);
1622 0 : cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
1623 0 : }
1624 0 : kv_update_uvd_dpm(rdev, gate);
1625 0 : if (pi->caps_uvd_pg)
1626 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
1627 : } else {
1628 0 : if (pi->caps_uvd_pg) {
1629 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
1630 0 : uvd_v4_2_resume(rdev);
1631 0 : uvd_v1_0_start(rdev);
1632 0 : cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
1633 0 : }
1634 0 : kv_update_uvd_dpm(rdev, gate);
1635 : }
1636 0 : }
1637 :
1638 0 : static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1639 : {
1640 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1641 :
1642 0 : if (pi->vce_power_gated == gate)
1643 0 : return;
1644 :
1645 0 : pi->vce_power_gated = gate;
1646 :
1647 0 : if (gate) {
1648 0 : if (pi->caps_vce_pg) {
1649 : /* XXX do we need a vce_v1_0_stop() ? */
1650 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1651 0 : }
1652 : } else {
1653 0 : if (pi->caps_vce_pg) {
1654 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1655 0 : vce_v2_0_resume(rdev);
1656 0 : vce_v1_0_start(rdev);
1657 0 : }
1658 : }
1659 0 : }
1660 :
1661 0 : static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
1662 : {
1663 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1664 :
1665 0 : if (pi->samu_power_gated == gate)
1666 0 : return;
1667 :
1668 0 : pi->samu_power_gated = gate;
1669 :
1670 0 : if (gate) {
1671 0 : kv_update_samu_dpm(rdev, true);
1672 0 : if (pi->caps_samu_pg)
1673 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
1674 : } else {
1675 0 : if (pi->caps_samu_pg)
1676 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
1677 0 : kv_update_samu_dpm(rdev, false);
1678 : }
1679 0 : }
1680 :
1681 0 : static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1682 : {
1683 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1684 :
1685 0 : if (pi->acp_power_gated == gate)
1686 0 : return;
1687 :
1688 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1689 0 : return;
1690 :
1691 0 : pi->acp_power_gated = gate;
1692 :
1693 0 : if (gate) {
1694 0 : kv_update_acp_dpm(rdev, true);
1695 0 : if (pi->caps_acp_pg)
1696 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
1697 : } else {
1698 0 : if (pi->caps_acp_pg)
1699 0 : kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
1700 0 : kv_update_acp_dpm(rdev, false);
1701 : }
1702 0 : }
1703 :
1704 0 : static void kv_set_valid_clock_range(struct radeon_device *rdev,
1705 : struct radeon_ps *new_rps)
1706 : {
1707 0 : struct kv_ps *new_ps = kv_get_ps(new_rps);
1708 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1709 : u32 i;
1710 : struct radeon_clock_voltage_dependency_table *table =
1711 0 : &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1712 :
1713 0 : if (table && table->count) {
1714 0 : for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1715 0 : if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1716 0 : (i == (pi->graphics_dpm_level_count - 1))) {
1717 0 : pi->lowest_valid = i;
1718 0 : break;
1719 : }
1720 : }
1721 :
1722 0 : for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1723 0 : if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1724 : break;
1725 : }
1726 0 : pi->highest_valid = i;
1727 :
1728 0 : if (pi->lowest_valid > pi->highest_valid) {
1729 0 : if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1730 0 : (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1731 0 : pi->highest_valid = pi->lowest_valid;
1732 : else
1733 0 : pi->lowest_valid = pi->highest_valid;
1734 : }
1735 : } else {
1736 : struct sumo_sclk_voltage_mapping_table *table =
1737 0 : &pi->sys_info.sclk_voltage_mapping_table;
1738 :
1739 0 : for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1740 0 : if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1741 0 : i == (int)(pi->graphics_dpm_level_count - 1)) {
1742 0 : pi->lowest_valid = i;
1743 0 : break;
1744 : }
1745 : }
1746 :
1747 0 : for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1748 0 : if (table->entries[i].sclk_frequency <=
1749 0 : new_ps->levels[new_ps->num_levels - 1].sclk)
1750 : break;
1751 : }
1752 0 : pi->highest_valid = i;
1753 :
1754 0 : if (pi->lowest_valid > pi->highest_valid) {
1755 0 : if ((new_ps->levels[0].sclk -
1756 0 : table->entries[pi->highest_valid].sclk_frequency) >
1757 0 : (table->entries[pi->lowest_valid].sclk_frequency -
1758 0 : new_ps->levels[new_ps->num_levels -1].sclk))
1759 0 : pi->highest_valid = pi->lowest_valid;
1760 : else
1761 0 : pi->lowest_valid = pi->highest_valid;
1762 : }
1763 : }
1764 0 : }
1765 :
1766 0 : static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1767 : struct radeon_ps *new_rps)
1768 : {
1769 0 : struct kv_ps *new_ps = kv_get_ps(new_rps);
1770 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1771 : int ret = 0;
1772 0 : u8 clk_bypass_cntl;
1773 :
1774 0 : if (pi->caps_enable_dfs_bypass) {
1775 0 : clk_bypass_cntl = new_ps->need_dfs_bypass ?
1776 0 : pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1777 0 : ret = kv_copy_bytes_to_smc(rdev,
1778 0 : (pi->dpm_table_start +
1779 0 : offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1780 0 : (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1781 : offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1782 : &clk_bypass_cntl,
1783 0 : sizeof(u8), pi->sram_end);
1784 0 : }
1785 :
1786 0 : return ret;
1787 0 : }
1788 :
1789 0 : static int kv_enable_nb_dpm(struct radeon_device *rdev,
1790 : bool enable)
1791 : {
1792 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1793 : int ret = 0;
1794 :
1795 0 : if (enable) {
1796 0 : if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1797 0 : ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1798 0 : if (ret == 0)
1799 0 : pi->nb_dpm_enabled = true;
1800 : }
1801 : } else {
1802 0 : if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
1803 0 : ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
1804 0 : if (ret == 0)
1805 0 : pi->nb_dpm_enabled = false;
1806 : }
1807 : }
1808 :
1809 0 : return ret;
1810 : }
1811 :
1812 0 : int kv_dpm_force_performance_level(struct radeon_device *rdev,
1813 : enum radeon_dpm_forced_level level)
1814 : {
1815 : int ret;
1816 :
1817 0 : if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1818 0 : ret = kv_force_dpm_highest(rdev);
1819 0 : if (ret)
1820 0 : return ret;
1821 0 : } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1822 0 : ret = kv_force_dpm_lowest(rdev);
1823 0 : if (ret)
1824 0 : return ret;
1825 0 : } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1826 0 : ret = kv_unforce_levels(rdev);
1827 0 : if (ret)
1828 0 : return ret;
1829 : }
1830 :
1831 0 : rdev->pm.dpm.forced_level = level;
1832 :
1833 0 : return 0;
1834 0 : }
1835 :
1836 0 : int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
1837 : {
1838 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1839 0 : struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1840 : struct radeon_ps *new_ps = &requested_ps;
1841 :
1842 0 : kv_update_requested_ps(rdev, new_ps);
1843 :
1844 0 : kv_apply_state_adjust_rules(rdev,
1845 0 : &pi->requested_rps,
1846 0 : &pi->current_rps);
1847 :
1848 0 : return 0;
1849 0 : }
1850 :
1851 0 : int kv_dpm_set_power_state(struct radeon_device *rdev)
1852 : {
1853 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1854 0 : struct radeon_ps *new_ps = &pi->requested_rps;
1855 0 : struct radeon_ps *old_ps = &pi->current_rps;
1856 : int ret;
1857 :
1858 0 : if (pi->bapm_enable) {
1859 0 : ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1860 0 : if (ret) {
1861 0 : DRM_ERROR("kv_smc_bapm_enable failed\n");
1862 0 : return ret;
1863 : }
1864 : }
1865 :
1866 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1867 0 : if (pi->enable_dpm) {
1868 0 : kv_set_valid_clock_range(rdev, new_ps);
1869 0 : kv_update_dfs_bypass_settings(rdev, new_ps);
1870 0 : ret = kv_calculate_ds_divider(rdev);
1871 0 : if (ret) {
1872 0 : DRM_ERROR("kv_calculate_ds_divider failed\n");
1873 0 : return ret;
1874 : }
1875 0 : kv_calculate_nbps_level_settings(rdev);
1876 0 : kv_calculate_dpm_settings(rdev);
1877 0 : kv_force_lowest_valid(rdev);
1878 0 : kv_enable_new_levels(rdev);
1879 0 : kv_upload_dpm_settings(rdev);
1880 0 : kv_program_nbps_index_settings(rdev, new_ps);
1881 0 : kv_unforce_levels(rdev);
1882 0 : kv_set_enabled_levels(rdev);
1883 0 : kv_force_lowest_valid(rdev);
1884 0 : kv_unforce_levels(rdev);
1885 :
1886 0 : ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1887 0 : if (ret) {
1888 0 : DRM_ERROR("kv_update_vce_dpm failed\n");
1889 0 : return ret;
1890 : }
1891 0 : kv_update_sclk_t(rdev);
1892 0 : if (rdev->family == CHIP_MULLINS)
1893 0 : kv_enable_nb_dpm(rdev, true);
1894 : }
1895 : } else {
1896 0 : if (pi->enable_dpm) {
1897 0 : kv_set_valid_clock_range(rdev, new_ps);
1898 0 : kv_update_dfs_bypass_settings(rdev, new_ps);
1899 0 : ret = kv_calculate_ds_divider(rdev);
1900 0 : if (ret) {
1901 0 : DRM_ERROR("kv_calculate_ds_divider failed\n");
1902 0 : return ret;
1903 : }
1904 0 : kv_calculate_nbps_level_settings(rdev);
1905 0 : kv_calculate_dpm_settings(rdev);
1906 0 : kv_freeze_sclk_dpm(rdev, true);
1907 0 : kv_upload_dpm_settings(rdev);
1908 0 : kv_program_nbps_index_settings(rdev, new_ps);
1909 0 : kv_freeze_sclk_dpm(rdev, false);
1910 0 : kv_set_enabled_levels(rdev);
1911 0 : ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1912 0 : if (ret) {
1913 0 : DRM_ERROR("kv_update_vce_dpm failed\n");
1914 0 : return ret;
1915 : }
1916 0 : kv_update_acp_boot_level(rdev);
1917 0 : kv_update_sclk_t(rdev);
1918 0 : kv_enable_nb_dpm(rdev, true);
1919 0 : }
1920 : }
1921 :
1922 0 : return 0;
1923 0 : }
1924 :
1925 0 : void kv_dpm_post_set_power_state(struct radeon_device *rdev)
1926 : {
1927 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1928 0 : struct radeon_ps *new_ps = &pi->requested_rps;
1929 :
1930 0 : kv_update_current_ps(rdev, new_ps);
1931 0 : }
1932 :
1933 0 : void kv_dpm_setup_asic(struct radeon_device *rdev)
1934 : {
1935 0 : sumo_take_smu_control(rdev, true);
1936 0 : kv_init_powergate_state(rdev);
1937 0 : kv_init_sclk_t(rdev);
1938 0 : }
1939 :
1940 : #if 0
1941 : void kv_dpm_reset_asic(struct radeon_device *rdev)
1942 : {
1943 : struct kv_power_info *pi = kv_get_pi(rdev);
1944 :
1945 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1946 : kv_force_lowest_valid(rdev);
1947 : kv_init_graphics_levels(rdev);
1948 : kv_program_bootup_state(rdev);
1949 : kv_upload_dpm_settings(rdev);
1950 : kv_force_lowest_valid(rdev);
1951 : kv_unforce_levels(rdev);
1952 : } else {
1953 : kv_init_graphics_levels(rdev);
1954 : kv_program_bootup_state(rdev);
1955 : kv_freeze_sclk_dpm(rdev, true);
1956 : kv_upload_dpm_settings(rdev);
1957 : kv_freeze_sclk_dpm(rdev, false);
1958 : kv_set_enabled_level(rdev, pi->graphics_boot_level);
1959 : }
1960 : }
1961 : #endif
1962 :
1963 : //XXX use sumo_dpm_display_configuration_changed
1964 :
1965 0 : static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1966 : struct radeon_clock_and_voltage_limits *table)
1967 : {
1968 0 : struct kv_power_info *pi = kv_get_pi(rdev);
1969 :
1970 0 : if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
1971 0 : int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
1972 0 : table->sclk =
1973 0 : pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
1974 0 : table->vddc =
1975 0 : kv_convert_2bit_index_to_voltage(rdev,
1976 0 : pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
1977 0 : }
1978 :
1979 0 : table->mclk = pi->sys_info.nbp_memory_clock[0];
1980 0 : }
1981 :
1982 0 : static void kv_patch_voltage_values(struct radeon_device *rdev)
1983 : {
1984 : int i;
1985 : struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
1986 0 : &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1987 : struct radeon_vce_clock_voltage_dependency_table *vce_table =
1988 0 : &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1989 : struct radeon_clock_voltage_dependency_table *samu_table =
1990 0 : &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1991 : struct radeon_clock_voltage_dependency_table *acp_table =
1992 0 : &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1993 :
1994 0 : if (uvd_table->count) {
1995 0 : for (i = 0; i < uvd_table->count; i++)
1996 0 : uvd_table->entries[i].v =
1997 0 : kv_convert_8bit_index_to_voltage(rdev,
1998 0 : uvd_table->entries[i].v);
1999 : }
2000 :
2001 0 : if (vce_table->count) {
2002 0 : for (i = 0; i < vce_table->count; i++)
2003 0 : vce_table->entries[i].v =
2004 0 : kv_convert_8bit_index_to_voltage(rdev,
2005 0 : vce_table->entries[i].v);
2006 : }
2007 :
2008 0 : if (samu_table->count) {
2009 0 : for (i = 0; i < samu_table->count; i++)
2010 0 : samu_table->entries[i].v =
2011 0 : kv_convert_8bit_index_to_voltage(rdev,
2012 0 : samu_table->entries[i].v);
2013 : }
2014 :
2015 0 : if (acp_table->count) {
2016 0 : for (i = 0; i < acp_table->count; i++)
2017 0 : acp_table->entries[i].v =
2018 0 : kv_convert_8bit_index_to_voltage(rdev,
2019 0 : acp_table->entries[i].v);
2020 : }
2021 :
2022 0 : }
2023 :
2024 0 : static void kv_construct_boot_state(struct radeon_device *rdev)
2025 : {
2026 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2027 :
2028 0 : pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
2029 0 : pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
2030 0 : pi->boot_pl.ds_divider_index = 0;
2031 0 : pi->boot_pl.ss_divider_index = 0;
2032 0 : pi->boot_pl.allow_gnb_slow = 1;
2033 0 : pi->boot_pl.force_nbp_state = 0;
2034 0 : pi->boot_pl.display_wm = 0;
2035 0 : pi->boot_pl.vce_wm = 0;
2036 0 : }
2037 :
2038 0 : static int kv_force_dpm_highest(struct radeon_device *rdev)
2039 : {
2040 : int ret;
2041 0 : u32 enable_mask, i;
2042 :
2043 0 : ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
2044 0 : if (ret)
2045 0 : return ret;
2046 :
2047 0 : for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
2048 0 : if (enable_mask & (1 << i))
2049 : break;
2050 : }
2051 :
2052 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2053 0 : return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
2054 : else
2055 0 : return kv_set_enabled_level(rdev, i);
2056 0 : }
2057 :
2058 0 : static int kv_force_dpm_lowest(struct radeon_device *rdev)
2059 : {
2060 : int ret;
2061 0 : u32 enable_mask, i;
2062 :
2063 0 : ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
2064 0 : if (ret)
2065 0 : return ret;
2066 :
2067 0 : for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2068 0 : if (enable_mask & (1 << i))
2069 : break;
2070 : }
2071 :
2072 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2073 0 : return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
2074 : else
2075 0 : return kv_set_enabled_level(rdev, i);
2076 0 : }
2077 :
2078 0 : static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2079 : u32 sclk, u32 min_sclk_in_sr)
2080 : {
2081 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2082 : u32 i;
2083 : u32 temp;
2084 0 : u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
2085 : min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
2086 :
2087 0 : if (sclk < min)
2088 0 : return 0;
2089 :
2090 0 : if (!pi->caps_sclk_ds)
2091 0 : return 0;
2092 :
2093 0 : for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
2094 0 : temp = sclk / sumo_get_sleep_divider_from_id(i);
2095 0 : if (temp >= min)
2096 : break;
2097 : }
2098 :
2099 0 : return (u8)i;
2100 0 : }
2101 :
2102 0 : static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
2103 : {
2104 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2105 : struct radeon_clock_voltage_dependency_table *table =
2106 0 : &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2107 : int i;
2108 :
2109 0 : if (table && table->count) {
2110 0 : for (i = table->count - 1; i >= 0; i--) {
2111 0 : if (pi->high_voltage_t &&
2112 0 : (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
2113 0 : pi->high_voltage_t)) {
2114 0 : *limit = i;
2115 0 : return 0;
2116 : }
2117 : }
2118 : } else {
2119 : struct sumo_sclk_voltage_mapping_table *table =
2120 0 : &pi->sys_info.sclk_voltage_mapping_table;
2121 :
2122 0 : for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
2123 0 : if (pi->high_voltage_t &&
2124 0 : (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
2125 0 : pi->high_voltage_t)) {
2126 0 : *limit = i;
2127 0 : return 0;
2128 : }
2129 : }
2130 0 : }
2131 :
2132 0 : *limit = 0;
2133 0 : return 0;
2134 0 : }
2135 :
2136 0 : static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2137 : struct radeon_ps *new_rps,
2138 : struct radeon_ps *old_rps)
2139 : {
2140 0 : struct kv_ps *ps = kv_get_ps(new_rps);
2141 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2142 : u32 min_sclk = 10000; /* ??? */
2143 : u32 sclk, mclk = 0;
2144 0 : int i, limit;
2145 : bool force_high;
2146 : struct radeon_clock_voltage_dependency_table *table =
2147 0 : &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2148 : u32 stable_p_state_sclk = 0;
2149 : struct radeon_clock_and_voltage_limits *max_limits =
2150 0 : &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2151 :
2152 0 : if (new_rps->vce_active) {
2153 0 : new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
2154 0 : new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
2155 0 : } else {
2156 0 : new_rps->evclk = 0;
2157 0 : new_rps->ecclk = 0;
2158 : }
2159 :
2160 0 : mclk = max_limits->mclk;
2161 : sclk = min_sclk;
2162 :
2163 0 : if (pi->caps_stable_p_state) {
2164 0 : stable_p_state_sclk = (max_limits->sclk * 75) / 100;
2165 :
2166 0 : for (i = table->count - 1; i >= 0; i++) {
2167 0 : if (stable_p_state_sclk >= table->entries[i].clk) {
2168 : stable_p_state_sclk = table->entries[i].clk;
2169 0 : break;
2170 : }
2171 : }
2172 :
2173 0 : if (i > 0)
2174 0 : stable_p_state_sclk = table->entries[0].clk;
2175 :
2176 : sclk = stable_p_state_sclk;
2177 0 : }
2178 :
2179 0 : if (new_rps->vce_active) {
2180 0 : if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
2181 0 : sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
2182 : }
2183 :
2184 0 : ps->need_dfs_bypass = true;
2185 :
2186 0 : for (i = 0; i < ps->num_levels; i++) {
2187 0 : if (ps->levels[i].sclk < sclk)
2188 0 : ps->levels[i].sclk = sclk;
2189 : }
2190 :
2191 0 : if (table && table->count) {
2192 0 : for (i = 0; i < ps->num_levels; i++) {
2193 0 : if (pi->high_voltage_t &&
2194 0 : (pi->high_voltage_t <
2195 0 : kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2196 0 : kv_get_high_voltage_limit(rdev, &limit);
2197 0 : ps->levels[i].sclk = table->entries[limit].clk;
2198 0 : }
2199 : }
2200 : } else {
2201 : struct sumo_sclk_voltage_mapping_table *table =
2202 0 : &pi->sys_info.sclk_voltage_mapping_table;
2203 :
2204 0 : for (i = 0; i < ps->num_levels; i++) {
2205 0 : if (pi->high_voltage_t &&
2206 0 : (pi->high_voltage_t <
2207 0 : kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2208 0 : kv_get_high_voltage_limit(rdev, &limit);
2209 0 : ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2210 0 : }
2211 : }
2212 : }
2213 :
2214 0 : if (pi->caps_stable_p_state) {
2215 0 : for (i = 0; i < ps->num_levels; i++) {
2216 0 : ps->levels[i].sclk = stable_p_state_sclk;
2217 : }
2218 : }
2219 :
2220 0 : pi->video_start = new_rps->dclk || new_rps->vclk ||
2221 0 : new_rps->evclk || new_rps->ecclk;
2222 :
2223 0 : if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2224 : ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2225 0 : pi->battery_state = true;
2226 : else
2227 0 : pi->battery_state = false;
2228 :
2229 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2230 0 : ps->dpm0_pg_nb_ps_lo = 0x1;
2231 0 : ps->dpm0_pg_nb_ps_hi = 0x0;
2232 0 : ps->dpmx_nb_ps_lo = 0x1;
2233 0 : ps->dpmx_nb_ps_hi = 0x0;
2234 0 : } else {
2235 0 : ps->dpm0_pg_nb_ps_lo = 0x3;
2236 0 : ps->dpm0_pg_nb_ps_hi = 0x0;
2237 0 : ps->dpmx_nb_ps_lo = 0x3;
2238 0 : ps->dpmx_nb_ps_hi = 0x0;
2239 :
2240 0 : if (pi->sys_info.nb_dpm_enable) {
2241 0 : force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2242 0 : pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2243 0 : pi->disable_nb_ps3_in_battery;
2244 0 : ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2245 0 : ps->dpm0_pg_nb_ps_hi = 0x2;
2246 0 : ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2247 0 : ps->dpmx_nb_ps_hi = 0x2;
2248 0 : }
2249 : }
2250 0 : }
2251 :
2252 0 : static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
2253 : u32 index, bool enable)
2254 : {
2255 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2256 :
2257 0 : pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2258 0 : }
2259 :
2260 0 : static int kv_calculate_ds_divider(struct radeon_device *rdev)
2261 : {
2262 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2263 : u32 sclk_in_sr = 10000; /* ??? */
2264 : u32 i;
2265 :
2266 0 : if (pi->lowest_valid > pi->highest_valid)
2267 0 : return -EINVAL;
2268 :
2269 0 : for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2270 0 : pi->graphics_level[i].DeepSleepDivId =
2271 0 : kv_get_sleep_divider_id_from_clock(rdev,
2272 0 : be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2273 : sclk_in_sr);
2274 : }
2275 0 : return 0;
2276 0 : }
2277 :
2278 0 : static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2279 : {
2280 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2281 : u32 i;
2282 : bool force_high;
2283 : struct radeon_clock_and_voltage_limits *max_limits =
2284 0 : &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2285 0 : u32 mclk = max_limits->mclk;
2286 :
2287 0 : if (pi->lowest_valid > pi->highest_valid)
2288 0 : return -EINVAL;
2289 :
2290 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2291 0 : for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2292 0 : pi->graphics_level[i].GnbSlow = 1;
2293 0 : pi->graphics_level[i].ForceNbPs1 = 0;
2294 0 : pi->graphics_level[i].UpH = 0;
2295 : }
2296 :
2297 0 : if (!pi->sys_info.nb_dpm_enable)
2298 0 : return 0;
2299 :
2300 0 : force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2301 0 : (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2302 :
2303 0 : if (force_high) {
2304 0 : for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2305 0 : pi->graphics_level[i].GnbSlow = 0;
2306 : } else {
2307 0 : if (pi->battery_state)
2308 0 : pi->graphics_level[0].ForceNbPs1 = 1;
2309 :
2310 0 : pi->graphics_level[1].GnbSlow = 0;
2311 0 : pi->graphics_level[2].GnbSlow = 0;
2312 0 : pi->graphics_level[3].GnbSlow = 0;
2313 0 : pi->graphics_level[4].GnbSlow = 0;
2314 : }
2315 : } else {
2316 0 : for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2317 0 : pi->graphics_level[i].GnbSlow = 1;
2318 0 : pi->graphics_level[i].ForceNbPs1 = 0;
2319 0 : pi->graphics_level[i].UpH = 0;
2320 : }
2321 :
2322 0 : if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2323 0 : pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2324 0 : pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2325 0 : if (pi->lowest_valid != pi->highest_valid)
2326 0 : pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2327 : }
2328 : }
2329 0 : return 0;
2330 0 : }
2331 :
2332 0 : static int kv_calculate_dpm_settings(struct radeon_device *rdev)
2333 : {
2334 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2335 : u32 i;
2336 :
2337 0 : if (pi->lowest_valid > pi->highest_valid)
2338 0 : return -EINVAL;
2339 :
2340 0 : for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2341 0 : pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2342 :
2343 0 : return 0;
2344 0 : }
2345 :
2346 0 : static void kv_init_graphics_levels(struct radeon_device *rdev)
2347 : {
2348 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2349 : u32 i;
2350 : struct radeon_clock_voltage_dependency_table *table =
2351 0 : &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2352 :
2353 0 : if (table && table->count) {
2354 : u32 vid_2bit;
2355 :
2356 0 : pi->graphics_dpm_level_count = 0;
2357 0 : for (i = 0; i < table->count; i++) {
2358 0 : if (pi->high_voltage_t &&
2359 0 : (pi->high_voltage_t <
2360 0 : kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
2361 : break;
2362 :
2363 0 : kv_set_divider_value(rdev, i, table->entries[i].clk);
2364 0 : vid_2bit = kv_convert_vid7_to_vid2(rdev,
2365 0 : &pi->sys_info.vid_mapping_table,
2366 0 : table->entries[i].v);
2367 0 : kv_set_vid(rdev, i, vid_2bit);
2368 0 : kv_set_at(rdev, i, pi->at[i]);
2369 0 : kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2370 0 : pi->graphics_dpm_level_count++;
2371 : }
2372 0 : } else {
2373 : struct sumo_sclk_voltage_mapping_table *table =
2374 0 : &pi->sys_info.sclk_voltage_mapping_table;
2375 :
2376 0 : pi->graphics_dpm_level_count = 0;
2377 0 : for (i = 0; i < table->num_max_dpm_entries; i++) {
2378 0 : if (pi->high_voltage_t &&
2379 0 : pi->high_voltage_t <
2380 0 : kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
2381 : break;
2382 :
2383 0 : kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
2384 0 : kv_set_vid(rdev, i, table->entries[i].vid_2bit);
2385 0 : kv_set_at(rdev, i, pi->at[i]);
2386 0 : kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2387 0 : pi->graphics_dpm_level_count++;
2388 : }
2389 : }
2390 :
2391 0 : for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2392 0 : kv_dpm_power_level_enable(rdev, i, false);
2393 0 : }
2394 :
2395 0 : static void kv_enable_new_levels(struct radeon_device *rdev)
2396 : {
2397 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2398 : u32 i;
2399 :
2400 0 : for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2401 0 : if (i >= pi->lowest_valid && i <= pi->highest_valid)
2402 0 : kv_dpm_power_level_enable(rdev, i, true);
2403 : }
2404 0 : }
2405 :
2406 0 : static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
2407 : {
2408 0 : u32 new_mask = (1 << level);
2409 :
2410 0 : return kv_send_msg_to_smc_with_parameter(rdev,
2411 : PPSMC_MSG_SCLKDPM_SetEnabledMask,
2412 : new_mask);
2413 : }
2414 :
2415 0 : static int kv_set_enabled_levels(struct radeon_device *rdev)
2416 : {
2417 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2418 : u32 i, new_mask = 0;
2419 :
2420 0 : for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2421 0 : new_mask |= (1 << i);
2422 :
2423 0 : return kv_send_msg_to_smc_with_parameter(rdev,
2424 : PPSMC_MSG_SCLKDPM_SetEnabledMask,
2425 : new_mask);
2426 : }
2427 :
2428 0 : static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2429 : struct radeon_ps *new_rps)
2430 : {
2431 0 : struct kv_ps *new_ps = kv_get_ps(new_rps);
2432 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2433 : u32 nbdpmconfig1;
2434 :
2435 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2436 0 : return;
2437 :
2438 0 : if (pi->sys_info.nb_dpm_enable) {
2439 0 : nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
2440 : nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
2441 : DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
2442 0 : nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
2443 0 : Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
2444 0 : DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
2445 0 : DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
2446 0 : WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
2447 0 : }
2448 0 : }
2449 :
2450 0 : static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
2451 : int min_temp, int max_temp)
2452 : {
2453 : int low_temp = 0 * 1000;
2454 : int high_temp = 255 * 1000;
2455 : u32 tmp;
2456 :
2457 0 : if (low_temp < min_temp)
2458 0 : low_temp = min_temp;
2459 0 : if (high_temp > max_temp)
2460 0 : high_temp = max_temp;
2461 0 : if (high_temp < low_temp) {
2462 0 : DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2463 0 : return -EINVAL;
2464 : }
2465 :
2466 0 : tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
2467 0 : tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
2468 0 : tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
2469 0 : DIG_THERM_INTL(49 + (low_temp / 1000)));
2470 0 : WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
2471 :
2472 0 : rdev->pm.dpm.thermal.min_temp = low_temp;
2473 0 : rdev->pm.dpm.thermal.max_temp = high_temp;
2474 :
2475 0 : return 0;
2476 0 : }
2477 :
2478 : union igp_info {
2479 : struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2480 : struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2481 : struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2482 : struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2483 : struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2484 : struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2485 : };
2486 :
2487 0 : static int kv_parse_sys_info_table(struct radeon_device *rdev)
2488 : {
2489 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2490 0 : struct radeon_mode_info *mode_info = &rdev->mode_info;
2491 : int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2492 : union igp_info *igp_info;
2493 0 : u8 frev, crev;
2494 0 : u16 data_offset;
2495 : int i;
2496 :
2497 0 : if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2498 : &frev, &crev, &data_offset)) {
2499 0 : igp_info = (union igp_info *)(mode_info->atom_context->bios +
2500 0 : data_offset);
2501 :
2502 0 : if (crev != 8) {
2503 0 : DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2504 0 : return -EINVAL;
2505 : }
2506 0 : pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2507 0 : pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2508 0 : pi->sys_info.bootup_nb_voltage_index =
2509 0 : le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2510 0 : if (igp_info->info_8.ucHtcTmpLmt == 0)
2511 0 : pi->sys_info.htc_tmp_lmt = 203;
2512 : else
2513 0 : pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2514 0 : if (igp_info->info_8.ucHtcHystLmt == 0)
2515 0 : pi->sys_info.htc_hyst_lmt = 5;
2516 : else
2517 0 : pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2518 0 : if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2519 0 : DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2520 0 : }
2521 :
2522 0 : if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2523 0 : pi->sys_info.nb_dpm_enable = true;
2524 : else
2525 0 : pi->sys_info.nb_dpm_enable = false;
2526 :
2527 0 : for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2528 0 : pi->sys_info.nbp_memory_clock[i] =
2529 0 : le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2530 0 : pi->sys_info.nbp_n_clock[i] =
2531 0 : le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2532 : }
2533 0 : if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2534 : SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2535 0 : pi->caps_enable_dfs_bypass = true;
2536 :
2537 0 : sumo_construct_sclk_voltage_mapping_table(rdev,
2538 0 : &pi->sys_info.sclk_voltage_mapping_table,
2539 0 : igp_info->info_8.sAvail_SCLK);
2540 :
2541 0 : sumo_construct_vid_mapping_table(rdev,
2542 0 : &pi->sys_info.vid_mapping_table,
2543 : igp_info->info_8.sAvail_SCLK);
2544 :
2545 0 : kv_construct_max_power_limits_table(rdev,
2546 0 : &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2547 0 : }
2548 0 : return 0;
2549 0 : }
2550 :
2551 : union power_info {
2552 : struct _ATOM_POWERPLAY_INFO info;
2553 : struct _ATOM_POWERPLAY_INFO_V2 info_2;
2554 : struct _ATOM_POWERPLAY_INFO_V3 info_3;
2555 : struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2556 : struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2557 : struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2558 : };
2559 :
2560 : union pplib_clock_info {
2561 : struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2562 : struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2563 : struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2564 : struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2565 : };
2566 :
2567 : union pplib_power_state {
2568 : struct _ATOM_PPLIB_STATE v1;
2569 : struct _ATOM_PPLIB_STATE_V2 v2;
2570 : };
2571 :
2572 0 : static void kv_patch_boot_state(struct radeon_device *rdev,
2573 : struct kv_ps *ps)
2574 : {
2575 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2576 :
2577 0 : ps->num_levels = 1;
2578 0 : ps->levels[0] = pi->boot_pl;
2579 0 : }
2580 :
2581 0 : static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
2582 : struct radeon_ps *rps,
2583 : struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2584 : u8 table_rev)
2585 : {
2586 0 : struct kv_ps *ps = kv_get_ps(rps);
2587 :
2588 0 : rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2589 0 : rps->class = le16_to_cpu(non_clock_info->usClassification);
2590 0 : rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2591 :
2592 0 : if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2593 0 : rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2594 0 : rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2595 0 : } else {
2596 0 : rps->vclk = 0;
2597 0 : rps->dclk = 0;
2598 : }
2599 :
2600 0 : if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2601 0 : rdev->pm.dpm.boot_ps = rps;
2602 0 : kv_patch_boot_state(rdev, ps);
2603 0 : }
2604 0 : if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2605 0 : rdev->pm.dpm.uvd_ps = rps;
2606 0 : }
2607 :
2608 0 : static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
2609 : struct radeon_ps *rps, int index,
2610 : union pplib_clock_info *clock_info)
2611 : {
2612 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2613 0 : struct kv_ps *ps = kv_get_ps(rps);
2614 0 : struct kv_pl *pl = &ps->levels[index];
2615 : u32 sclk;
2616 :
2617 0 : sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2618 0 : sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2619 0 : pl->sclk = sclk;
2620 0 : pl->vddc_index = clock_info->sumo.vddcIndex;
2621 :
2622 0 : ps->num_levels = index + 1;
2623 :
2624 0 : if (pi->caps_sclk_ds) {
2625 0 : pl->ds_divider_index = 5;
2626 0 : pl->ss_divider_index = 5;
2627 0 : }
2628 0 : }
2629 :
2630 0 : static int kv_parse_power_table(struct radeon_device *rdev)
2631 : {
2632 0 : struct radeon_mode_info *mode_info = &rdev->mode_info;
2633 : struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2634 : union pplib_power_state *power_state;
2635 : int i, j, k, non_clock_array_index, clock_array_index;
2636 : union pplib_clock_info *clock_info;
2637 : struct _StateArray *state_array;
2638 : struct _ClockInfoArray *clock_info_array;
2639 : struct _NonClockInfoArray *non_clock_info_array;
2640 : union power_info *power_info;
2641 : int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2642 0 : u16 data_offset;
2643 0 : u8 frev, crev;
2644 : u8 *power_state_offset;
2645 : struct kv_ps *ps;
2646 :
2647 0 : if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2648 : &frev, &crev, &data_offset))
2649 0 : return -EINVAL;
2650 0 : power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2651 :
2652 0 : state_array = (struct _StateArray *)
2653 0 : (mode_info->atom_context->bios + data_offset +
2654 0 : le16_to_cpu(power_info->pplib.usStateArrayOffset));
2655 0 : clock_info_array = (struct _ClockInfoArray *)
2656 0 : (mode_info->atom_context->bios + data_offset +
2657 0 : le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2658 0 : non_clock_info_array = (struct _NonClockInfoArray *)
2659 0 : (mode_info->atom_context->bios + data_offset +
2660 0 : le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2661 :
2662 0 : rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
2663 0 : state_array->ucNumEntries, GFP_KERNEL);
2664 0 : if (!rdev->pm.dpm.ps)
2665 0 : return -ENOMEM;
2666 0 : power_state_offset = (u8 *)state_array->states;
2667 0 : for (i = 0; i < state_array->ucNumEntries; i++) {
2668 : u8 *idx;
2669 0 : power_state = (union pplib_power_state *)power_state_offset;
2670 0 : non_clock_array_index = power_state->v2.nonClockInfoIndex;
2671 : non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2672 0 : &non_clock_info_array->nonClockInfo[non_clock_array_index];
2673 0 : if (!rdev->pm.power_state[i].clock_info)
2674 0 : return -EINVAL;
2675 0 : ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2676 0 : if (ps == NULL) {
2677 0 : kfree(rdev->pm.dpm.ps);
2678 0 : return -ENOMEM;
2679 : }
2680 0 : rdev->pm.dpm.ps[i].ps_priv = ps;
2681 : k = 0;
2682 0 : idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2683 0 : for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2684 0 : clock_array_index = idx[j];
2685 0 : if (clock_array_index >= clock_info_array->ucNumEntries)
2686 : continue;
2687 0 : if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2688 : break;
2689 0 : clock_info = (union pplib_clock_info *)
2690 0 : ((u8 *)&clock_info_array->clockInfo[0] +
2691 0 : (clock_array_index * clock_info_array->ucEntrySize));
2692 0 : kv_parse_pplib_clock_info(rdev,
2693 0 : &rdev->pm.dpm.ps[i], k,
2694 : clock_info);
2695 0 : k++;
2696 0 : }
2697 0 : kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2698 : non_clock_info,
2699 0 : non_clock_info_array->ucEntrySize);
2700 0 : power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2701 0 : }
2702 0 : rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2703 :
2704 : /* fill in the vce power states */
2705 0 : for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
2706 : u32 sclk;
2707 0 : clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
2708 0 : clock_info = (union pplib_clock_info *)
2709 0 : &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2710 0 : sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2711 0 : sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2712 0 : rdev->pm.dpm.vce_states[i].sclk = sclk;
2713 0 : rdev->pm.dpm.vce_states[i].mclk = 0;
2714 : }
2715 :
2716 0 : return 0;
2717 0 : }
2718 :
2719 0 : int kv_dpm_init(struct radeon_device *rdev)
2720 : {
2721 : struct kv_power_info *pi;
2722 : int ret, i;
2723 :
2724 0 : pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2725 0 : if (pi == NULL)
2726 0 : return -ENOMEM;
2727 0 : rdev->pm.dpm.priv = pi;
2728 :
2729 0 : ret = r600_get_platform_caps(rdev);
2730 0 : if (ret)
2731 0 : return ret;
2732 :
2733 0 : ret = r600_parse_extended_power_table(rdev);
2734 0 : if (ret)
2735 0 : return ret;
2736 :
2737 0 : for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2738 0 : pi->at[i] = TRINITY_AT_DFLT;
2739 :
2740 0 : pi->sram_end = SMC_RAM_END;
2741 :
2742 : /* Enabling nb dpm on an asrock system prevents dpm from working */
2743 0 : if (rdev->pdev->subsystem_vendor == 0x1849)
2744 0 : pi->enable_nb_dpm = false;
2745 : else
2746 0 : pi->enable_nb_dpm = true;
2747 :
2748 0 : pi->caps_power_containment = true;
2749 0 : pi->caps_cac = true;
2750 0 : pi->enable_didt = false;
2751 0 : if (pi->enable_didt) {
2752 0 : pi->caps_sq_ramping = true;
2753 0 : pi->caps_db_ramping = true;
2754 0 : pi->caps_td_ramping = true;
2755 0 : pi->caps_tcp_ramping = true;
2756 0 : }
2757 :
2758 0 : pi->caps_sclk_ds = true;
2759 0 : pi->enable_auto_thermal_throttling = true;
2760 0 : pi->disable_nb_ps3_in_battery = false;
2761 0 : if (radeon_bapm == -1) {
2762 : /* only enable bapm on KB, ML by default */
2763 0 : if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2764 0 : pi->bapm_enable = true;
2765 : else
2766 0 : pi->bapm_enable = false;
2767 0 : } else if (radeon_bapm == 0) {
2768 0 : pi->bapm_enable = false;
2769 0 : } else {
2770 0 : pi->bapm_enable = true;
2771 : }
2772 0 : pi->voltage_drop_t = 0;
2773 0 : pi->caps_sclk_throttle_low_notification = false;
2774 0 : pi->caps_fps = false; /* true? */
2775 0 : pi->caps_uvd_pg = true;
2776 0 : pi->caps_uvd_dpm = true;
2777 0 : pi->caps_vce_pg = false; /* XXX true */
2778 0 : pi->caps_samu_pg = false;
2779 0 : pi->caps_acp_pg = false;
2780 0 : pi->caps_stable_p_state = false;
2781 :
2782 0 : ret = kv_parse_sys_info_table(rdev);
2783 0 : if (ret)
2784 0 : return ret;
2785 :
2786 0 : kv_patch_voltage_values(rdev);
2787 0 : kv_construct_boot_state(rdev);
2788 :
2789 0 : ret = kv_parse_power_table(rdev);
2790 0 : if (ret)
2791 0 : return ret;
2792 :
2793 0 : pi->enable_dpm = true;
2794 :
2795 0 : return 0;
2796 0 : }
2797 :
2798 0 : void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2799 : struct seq_file *m)
2800 : {
2801 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2802 : u32 current_index =
2803 0 : (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2804 : CURR_SCLK_INDEX_SHIFT;
2805 : u32 sclk, tmp;
2806 : u16 vddc;
2807 :
2808 0 : if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2809 0 : seq_printf(m, "invalid dpm profile %d\n", current_index);
2810 0 : } else {
2811 0 : sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2812 0 : tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2813 : SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
2814 0 : vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
2815 0 : seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
2816 0 : seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
2817 0 : seq_printf(m, "power level %d sclk: %u vddc: %u\n",
2818 0 : current_index, sclk, vddc);
2819 : }
2820 0 : }
2821 :
2822 0 : u32 kv_dpm_get_current_sclk(struct radeon_device *rdev)
2823 : {
2824 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2825 : u32 current_index =
2826 0 : (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2827 : CURR_SCLK_INDEX_SHIFT;
2828 : u32 sclk;
2829 :
2830 0 : if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2831 0 : return 0;
2832 : } else {
2833 0 : sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2834 0 : return sclk;
2835 : }
2836 0 : }
2837 :
2838 0 : u32 kv_dpm_get_current_mclk(struct radeon_device *rdev)
2839 : {
2840 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2841 :
2842 0 : return pi->sys_info.bootup_uma_clk;
2843 : }
2844 :
2845 0 : void kv_dpm_print_power_state(struct radeon_device *rdev,
2846 : struct radeon_ps *rps)
2847 : {
2848 : int i;
2849 0 : struct kv_ps *ps = kv_get_ps(rps);
2850 :
2851 0 : r600_dpm_print_class_info(rps->class, rps->class2);
2852 0 : r600_dpm_print_cap_info(rps->caps);
2853 0 : printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2854 0 : for (i = 0; i < ps->num_levels; i++) {
2855 0 : struct kv_pl *pl = &ps->levels[i];
2856 0 : printk("\t\tpower level %d sclk: %u vddc: %u\n",
2857 : i, pl->sclk,
2858 : kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
2859 : }
2860 0 : r600_dpm_print_ps_status(rdev, rps);
2861 0 : }
2862 :
2863 0 : void kv_dpm_fini(struct radeon_device *rdev)
2864 : {
2865 : int i;
2866 :
2867 0 : for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2868 0 : kfree(rdev->pm.dpm.ps[i].ps_priv);
2869 : }
2870 0 : kfree(rdev->pm.dpm.ps);
2871 0 : kfree(rdev->pm.dpm.priv);
2872 0 : r600_free_extended_power_table(rdev);
2873 0 : }
2874 :
2875 0 : void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
2876 : {
2877 :
2878 0 : }
2879 :
2880 0 : u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
2881 : {
2882 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2883 0 : struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2884 :
2885 0 : if (low)
2886 0 : return requested_state->levels[0].sclk;
2887 : else
2888 0 : return requested_state->levels[requested_state->num_levels - 1].sclk;
2889 0 : }
2890 :
2891 0 : u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
2892 : {
2893 0 : struct kv_power_info *pi = kv_get_pi(rdev);
2894 :
2895 0 : return pi->sys_info.bootup_uma_clk;
2896 : }
2897 :
|