1/* $NetBSD: nouveau_nvkm_subdev_clk_nv50.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $ */ 2 3/* 4 * Copyright 2012 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Ben Skeggs 25 */ 26#include <sys/cdefs.h> 27__KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_clk_nv50.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $"); 28 29#include "nv50.h" 30#include "pll.h" 31#include "seq.h" 32 33#include <subdev/bios.h> 34#include <subdev/bios/pll.h> 35 36static u32 37read_div(struct nv50_clk *clk) 38{ 39 struct nvkm_device *device = clk->base.subdev.device; 40 switch (device->chipset) { 41 case 0x50: /* it exists, but only has bit 31, not the dividers.. */ 42 case 0x84: 43 case 0x86: 44 case 0x98: 45 case 0xa0: 46 return nvkm_rd32(device, 0x004700); 47 case 0x92: 48 case 0x94: 49 case 0x96: 50 return nvkm_rd32(device, 0x004800); 51 default: 52 return 0x00000000; 53 } 54} 55 56static u32 57read_pll_src(struct nv50_clk *clk, u32 base) 58{ 59 struct nvkm_subdev *subdev = &clk->base.subdev; 60 struct nvkm_device *device = subdev->device; 61 u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal); 62 u32 rsel = nvkm_rd32(device, 0x00e18c); 63 int P, N, M, id; 64 65 switch (device->chipset) { 66 case 0x50: 67 case 0xa0: 68 switch (base) { 69 case 0x4020: 70 case 0x4028: id = !!(rsel & 0x00000004); break; 71 case 0x4008: id = !!(rsel & 0x00000008); break; 72 case 0x4030: id = 0; break; 73 default: 74 nvkm_error(subdev, "ref: bad pll %06x\n", base); 75 return 0; 76 } 77 78 coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c)); 79 ref *= (coef & 0x01000000) ? 2 : 4; 80 P = (coef & 0x00070000) >> 16; 81 N = ((coef & 0x0000ff00) >> 8) + 1; 82 M = ((coef & 0x000000ff) >> 0) + 1; 83 break; 84 case 0x84: 85 case 0x86: 86 case 0x92: 87 coef = nvkm_rd32(device, 0x00e81c); 88 P = (coef & 0x00070000) >> 16; 89 N = (coef & 0x0000ff00) >> 8; 90 M = (coef & 0x000000ff) >> 0; 91 break; 92 case 0x94: 93 case 0x96: 94 case 0x98: 95 rsel = nvkm_rd32(device, 0x00c050); 96 switch (base) { 97 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break; 98 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break; 99 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break; 100 case 0x4030: rsel = 3; break; 101 default: 102 nvkm_error(subdev, "ref: bad pll %06x\n", base); 103 return 0; 104 } 105 106 switch (rsel) { 107 case 0: id = 1; break; 108 case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal); 109 case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href); 110 case 3: id = 0; break; 111 } 112 113 coef = nvkm_rd32(device, 0x00e81c + (id * 0x28)); 114 P = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7; 115 P += (coef & 0x00070000) >> 16; 116 N = (coef & 0x0000ff00) >> 8; 117 M = (coef & 0x000000ff) >> 0; 118 break; 119 default: 120 BUG(); 121 } 122 123 if (M) 124 return (ref * N / M) >> P; 125 126 return 0; 127} 128 129static u32 130read_pll_ref(struct nv50_clk *clk, u32 base) 131{ 132 struct nvkm_subdev *subdev = &clk->base.subdev; 133 struct nvkm_device *device = subdev->device; 134 u32 src, mast = nvkm_rd32(device, 0x00c040); 135 136 switch (base) { 137 case 0x004028: 138 src = !!(mast & 0x00200000); 139 break; 140 case 0x004020: 141 src = !!(mast & 0x00400000); 142 break; 143 case 0x004008: 144 src = !!(mast & 0x00010000); 145 break; 146 case 0x004030: 147 src = !!(mast & 0x02000000); 148 break; 149 case 0x00e810: 150 return nvkm_clk_read(&clk->base, nv_clk_src_crystal); 151 default: 152 nvkm_error(subdev, "bad pll %06x\n", base); 153 return 0; 154 } 155 156 if (src) 157 return nvkm_clk_read(&clk->base, nv_clk_src_href); 158 159 return read_pll_src(clk, base); 160} 161 162static u32 163read_pll(struct nv50_clk *clk, u32 base) 164{ 165 struct nvkm_device *device = clk->base.subdev.device; 166 u32 mast = nvkm_rd32(device, 0x00c040); 167 u32 ctrl = nvkm_rd32(device, base + 0); 168 u32 coef = nvkm_rd32(device, base + 4); 169 u32 ref = read_pll_ref(clk, base); 170 u32 freq = 0; 171 int N1, N2, M1, M2; 172 173 if (base == 0x004028 && (mast & 0x00100000)) { 174 /* wtf, appears to only disable post-divider on gt200 */ 175 if (device->chipset != 0xa0) 176 return nvkm_clk_read(&clk->base, nv_clk_src_dom6); 177 } 178 179 N2 = (coef & 0xff000000) >> 24; 180 M2 = (coef & 0x00ff0000) >> 16; 181 N1 = (coef & 0x0000ff00) >> 8; 182 M1 = (coef & 0x000000ff); 183 if ((ctrl & 0x80000000) && M1) { 184 freq = ref * N1 / M1; 185 if ((ctrl & 0x40000100) == 0x40000000) { 186 if (M2) 187 freq = freq * N2 / M2; 188 else 189 freq = 0; 190 } 191 } 192 193 return freq; 194} 195 196int 197nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src) 198{ 199 struct nv50_clk *clk = nv50_clk(base); 200 struct nvkm_subdev *subdev = &clk->base.subdev; 201 struct nvkm_device *device = subdev->device; 202 u32 mast = nvkm_rd32(device, 0x00c040); 203 u32 P = 0; 204 205 switch (src) { 206 case nv_clk_src_crystal: 207 return device->crystal; 208 case nv_clk_src_href: 209 return 100000; /* PCIE reference clock */ 210 case nv_clk_src_hclk: 211 return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000); 212 case nv_clk_src_hclkm3: 213 return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3; 214 case nv_clk_src_hclkm3d2: 215 return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2; 216 case nv_clk_src_host: 217 switch (mast & 0x30000000) { 218 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href); 219 case 0x10000000: break; 220 case 0x20000000: /* !0x50 */ 221 case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk); 222 } 223 break; 224 case nv_clk_src_core: 225 if (!(mast & 0x00100000)) 226 P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16; 227 switch (mast & 0x00000003) { 228 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; 229 case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6); 230 case 0x00000002: return read_pll(clk, 0x004020) >> P; 231 case 0x00000003: return read_pll(clk, 0x004028) >> P; 232 } 233 break; 234 case nv_clk_src_shader: 235 P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16; 236 switch (mast & 0x00000030) { 237 case 0x00000000: 238 if (mast & 0x00000080) 239 return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P; 240 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; 241 case 0x00000010: break; 242 case 0x00000020: return read_pll(clk, 0x004028) >> P; 243 case 0x00000030: return read_pll(clk, 0x004020) >> P; 244 } 245 break; 246 case nv_clk_src_mem: 247 P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16; 248 if (nvkm_rd32(device, 0x004008) & 0x00000200) { 249 switch (mast & 0x0000c000) { 250 case 0x00000000: 251 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; 252 case 0x00008000: 253 case 0x0000c000: 254 return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P; 255 } 256 } else { 257 return read_pll(clk, 0x004008) >> P; 258 } 259 break; 260 case nv_clk_src_vdec: 261 P = (read_div(clk) & 0x00000700) >> 8; 262 switch (device->chipset) { 263 case 0x84: 264 case 0x86: 265 case 0x92: 266 case 0x94: 267 case 0x96: 268 case 0xa0: 269 switch (mast & 0x00000c00) { 270 case 0x00000000: 271 if (device->chipset == 0xa0) /* wtf?? */ 272 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; 273 return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; 274 case 0x00000400: 275 return 0; 276 case 0x00000800: 277 if (mast & 0x01000000) 278 return read_pll(clk, 0x004028) >> P; 279 return read_pll(clk, 0x004030) >> P; 280 case 0x00000c00: 281 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; 282 } 283 break; 284 case 0x98: 285 switch (mast & 0x00000c00) { 286 case 0x00000000: 287 return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; 288 case 0x00000400: 289 return 0; 290 case 0x00000800: 291 return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P; 292 case 0x00000c00: 293 return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P; 294 } 295 break; 296 } 297 break; 298 case nv_clk_src_dom6: 299 switch (device->chipset) { 300 case 0x50: 301 case 0xa0: 302 return read_pll(clk, 0x00e810) >> 2; 303 case 0x84: 304 case 0x86: 305 case 0x92: 306 case 0x94: 307 case 0x96: 308 case 0x98: 309 P = (read_div(clk) & 0x00000007) >> 0; 310 switch (mast & 0x0c000000) { 311 case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href); 312 case 0x04000000: break; 313 case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk); 314 case 0x0c000000: 315 return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P; 316 } 317 break; 318 default: 319 break; 320 } 321 default: 322 break; 323 } 324 325 nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast); 326 return -EINVAL; 327} 328 329static u32 330calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P) 331{ 332 struct nvkm_subdev *subdev = &clk->base.subdev; 333 struct nvbios_pll pll; 334 int ret; 335 336 ret = nvbios_pll_parse(subdev->device->bios, reg, &pll); 337 if (ret) 338 return 0; 339 340 pll.vco2.max_freq = 0; 341 pll.refclk = read_pll_ref(clk, reg); 342 if (!pll.refclk) 343 return 0; 344 345 return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P); 346} 347 348static inline u32 349calc_div(u32 src, u32 target, int *div) 350{ 351 u32 clk0 = src, clk1 = src; 352 for (*div = 0; *div <= 7; (*div)++) { 353 if (clk0 <= target) { 354 clk1 = clk0 << (*div ? 1 : 0); 355 break; 356 } 357 clk0 >>= 1; 358 } 359 360 if (target - clk0 <= clk1 - target) 361 return clk0; 362 (*div)--; 363 return clk1; 364} 365 366static inline u32 367clk_same(u32 a, u32 b) 368{ 369 return ((a / 1000) == (b / 1000)); 370} 371 372int 373nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) 374{ 375 struct nv50_clk *clk = nv50_clk(base); 376 struct nv50_clk_hwsq *hwsq = &clk->hwsq; 377 struct nvkm_subdev *subdev = &clk->base.subdev; 378 struct nvkm_device *device = subdev->device; 379 const int shader = cstate->domain[nv_clk_src_shader]; 380 const int core = cstate->domain[nv_clk_src_core]; 381 const int vdec = cstate->domain[nv_clk_src_vdec]; 382 const int dom6 = cstate->domain[nv_clk_src_dom6]; 383 u32 mastm = 0, mastv = 0; 384 u32 divsm = 0, divsv = 0; 385 int N, M, P1, P2; 386 int freq, out; 387 388 /* prepare a hwsq script from which we'll perform the reclock */ 389 out = clk_init(hwsq, subdev); 390 if (out) 391 return out; 392 393 clk_wr32(hwsq, fifo, 0x00000001); /* block fifo */ 394 clk_nsec(hwsq, 8000); 395 clk_setf(hwsq, 0x10, 0x00); /* disable fb */ 396 clk_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */ 397 398 /* vdec: avoid modifying xpll until we know exactly how the other 399 * clock domains work, i suspect at least some of them can also be 400 * tied to xpll... 401 */ 402 if (vdec) { 403 /* see how close we can get using nvclk as a source */ 404 freq = calc_div(core, vdec, &P1); 405 406 /* see how close we can get using xpll/hclk as a source */ 407 if (device->chipset != 0x98) 408 out = read_pll(clk, 0x004030); 409 else 410 out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2); 411 out = calc_div(out, vdec, &P2); 412 413 /* select whichever gets us closest */ 414 if (abs(vdec - freq) <= abs(vdec - out)) { 415 if (device->chipset != 0x98) 416 mastv |= 0x00000c00; 417 divsv |= P1 << 8; 418 } else { 419 mastv |= 0x00000800; 420 divsv |= P2 << 8; 421 } 422 423 mastm |= 0x00000c00; 424 divsm |= 0x00000700; 425 } 426 427 /* dom6: nfi what this is, but we're limited to various combinations 428 * of the host clock frequency 429 */ 430 if (dom6) { 431 if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) { 432 mastv |= 0x00000000; 433 } else 434 if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) { 435 mastv |= 0x08000000; 436 } else { 437 freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3; 438 calc_div(freq, dom6, &P1); 439 440 mastv |= 0x0c000000; 441 divsv |= P1; 442 } 443 444 mastm |= 0x0c000000; 445 divsm |= 0x00000007; 446 } 447 448 /* vdec/dom6: switch to "safe" clocks temporarily, update dividers 449 * and then switch to target clocks 450 */ 451 clk_mask(hwsq, mast, mastm, 0x00000000); 452 clk_mask(hwsq, divs, divsm, divsv); 453 clk_mask(hwsq, mast, mastm, mastv); 454 455 /* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6, 456 * sclk to hclk) before reprogramming 457 */ 458 if (device->chipset < 0x92) 459 clk_mask(hwsq, mast, 0x001000b0, 0x00100080); 460 else 461 clk_mask(hwsq, mast, 0x000000b3, 0x00000081); 462 463 /* core: for the moment at least, always use nvpll */ 464 freq = calc_pll(clk, 0x4028, core, &N, &M, &P1); 465 if (freq == 0) 466 return -ERANGE; 467 468 clk_mask(hwsq, nvpll[0], 0xc03f0100, 469 0x80000000 | (P1 << 19) | (P1 << 16)); 470 clk_mask(hwsq, nvpll[1], 0x0000ffff, (N << 8) | M); 471 472 /* shader: tie to nvclk if possible, otherwise use spll. have to be 473 * very careful that the shader clock is at least twice the core, or 474 * some chipsets will be very unhappy. i expect most or all of these 475 * cases will be handled by tying to nvclk, but it's possible there's 476 * corners 477 */ 478 if (P1-- && shader == (core << 1)) { 479 clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16)); 480 clk_mask(hwsq, mast, 0x00100033, 0x00000023); 481 } else { 482 freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1); 483 if (freq == 0) 484 return -ERANGE; 485 486 clk_mask(hwsq, spll[0], 0xc03f0100, 487 0x80000000 | (P1 << 19) | (P1 << 16)); 488 clk_mask(hwsq, spll[1], 0x0000ffff, (N << 8) | M); 489 clk_mask(hwsq, mast, 0x00100033, 0x00000033); 490 } 491 492 /* restore normal operation */ 493 clk_setf(hwsq, 0x10, 0x01); /* enable fb */ 494 clk_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */ 495 clk_wr32(hwsq, fifo, 0x00000000); /* un-block fifo */ 496 return 0; 497} 498 499int 500nv50_clk_prog(struct nvkm_clk *base) 501{ 502 struct nv50_clk *clk = nv50_clk(base); 503 return clk_exec(&clk->hwsq, true); 504} 505 506void 507nv50_clk_tidy(struct nvkm_clk *base) 508{ 509 struct nv50_clk *clk = nv50_clk(base); 510 clk_exec(&clk->hwsq, false); 511} 512 513int 514nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device, 515 int index, bool allow_reclock, struct nvkm_clk **pclk) 516{ 517 struct nv50_clk *clk; 518 int ret; 519 520 if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) 521 return -ENOMEM; 522 ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base); 523 *pclk = &clk->base; 524 if (ret) 525 return ret; 526 527 clk->hwsq.r_fifo = hwsq_reg(0x002504); 528 clk->hwsq.r_spll[0] = hwsq_reg(0x004020); 529 clk->hwsq.r_spll[1] = hwsq_reg(0x004024); 530 clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028); 531 clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c); 532 switch (device->chipset) { 533 case 0x92: 534 case 0x94: 535 case 0x96: 536 clk->hwsq.r_divs = hwsq_reg(0x004800); 537 break; 538 default: 539 clk->hwsq.r_divs = hwsq_reg(0x004700); 540 break; 541 } 542 clk->hwsq.r_mast = hwsq_reg(0x00c040); 543 return 0; 544} 545 546static const struct nvkm_clk_func 547nv50_clk = { 548 .read = nv50_clk_read, 549 .calc = nv50_clk_calc, 550 .prog = nv50_clk_prog, 551 .tidy = nv50_clk_tidy, 552 .domains = { 553 { nv_clk_src_crystal, 0xff }, 554 { nv_clk_src_href , 0xff }, 555 { nv_clk_src_core , 0xff, 0, "core", 1000 }, 556 { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, 557 { nv_clk_src_mem , 0xff, 0, "memory", 1000 }, 558 { nv_clk_src_max } 559 } 560}; 561 562int 563nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) 564{ 565 return nv50_clk_new_(&nv50_clk, device, index, false, pclk); 566} 567