1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26#include <subdev/bios.h>
27#include <subdev/bios/boost.h>
28#include <subdev/bios/cstep.h>
29#include <subdev/bios/perf.h>
30#include <subdev/bios/vpstate.h>
31#include <subdev/fb.h>
32#include <subdev/therm.h>
33#include <subdev/volt.h>
34
35#include <core/option.h>
36
37/******************************************************************************
38 * misc
39 *****************************************************************************/
40static u32
41nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
42		u8 pstate, u8 domain, u32 input)
43{
44	struct nvkm_bios *bios = clk->subdev.device->bios;
45	struct nvbios_boostE boostE;
46	u8  ver, hdr, cnt, len;
47	u32 data;
48
49	data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
50	if (data) {
51		struct nvbios_boostS boostS;
52		u8  idx = 0, sver, shdr;
53		u32 subd;
54
55		input = max(boostE.min, input);
56		input = min(boostE.max, input);
57		do {
58			sver = ver;
59			shdr = hdr;
60			subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
61					      cnt, len, &boostS);
62			if (subd && boostS.domain == domain) {
63				if (adjust)
64					input = input * boostS.percent / 100;
65				input = max(boostS.min, input);
66				input = min(boostS.max, input);
67				break;
68			}
69		} while (subd);
70	}
71
72	return input;
73}
74
75/******************************************************************************
76 * C-States
77 *****************************************************************************/
78static bool
79nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
80		  u32 max_volt, int temp)
81{
82	const struct nvkm_domain *domain = clk->domains;
83	struct nvkm_volt *volt = clk->subdev.device->volt;
84	int voltage;
85
86	while (domain && domain->name != nv_clk_src_max) {
87		if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
88			u32 freq = cstate->domain[domain->name];
89			switch (clk->boost_mode) {
90			case NVKM_CLK_BOOST_NONE:
91				if (clk->base_khz && freq > clk->base_khz)
92					return false;
93				fallthrough;
94			case NVKM_CLK_BOOST_BIOS:
95				if (clk->boost_khz && freq > clk->boost_khz)
96					return false;
97			}
98		}
99		domain++;
100	}
101
102	if (!volt)
103		return true;
104
105	voltage = nvkm_volt_map(volt, cstate->voltage, temp);
106	if (voltage < 0)
107		return false;
108	return voltage <= min(max_volt, volt->max_uv);
109}
110
111static struct nvkm_cstate *
112nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
113		      struct nvkm_cstate *cstate)
114{
115	struct nvkm_device *device = clk->subdev.device;
116	struct nvkm_volt *volt = device->volt;
117	int max_volt;
118
119	if (!pstate || !cstate)
120		return NULL;
121
122	if (!volt)
123		return cstate;
124
125	max_volt = volt->max_uv;
126	if (volt->max0_id != 0xff)
127		max_volt = min(max_volt,
128			       nvkm_volt_map(volt, volt->max0_id, clk->temp));
129	if (volt->max1_id != 0xff)
130		max_volt = min(max_volt,
131			       nvkm_volt_map(volt, volt->max1_id, clk->temp));
132	if (volt->max2_id != 0xff)
133		max_volt = min(max_volt,
134			       nvkm_volt_map(volt, volt->max2_id, clk->temp));
135
136	list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
137		if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
138			return cstate;
139	}
140
141	return NULL;
142}
143
144static struct nvkm_cstate *
145nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
146{
147	struct nvkm_cstate *cstate;
148	if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
149		return list_last_entry(&pstate->list, typeof(*cstate), head);
150	else {
151		list_for_each_entry(cstate, &pstate->list, head) {
152			if (cstate->id == cstatei)
153				return cstate;
154		}
155	}
156	return NULL;
157}
158
159static int
160nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
161{
162	struct nvkm_subdev *subdev = &clk->subdev;
163	struct nvkm_device *device = subdev->device;
164	struct nvkm_therm *therm = device->therm;
165	struct nvkm_volt *volt = device->volt;
166	struct nvkm_cstate *cstate;
167	int ret;
168
169	if (!list_empty(&pstate->list)) {
170		cstate = nvkm_cstate_get(clk, pstate, cstatei);
171		cstate = nvkm_cstate_find_best(clk, pstate, cstate);
172		if (!cstate)
173			return -EINVAL;
174	} else {
175		cstate = &pstate->base;
176	}
177
178	if (therm) {
179		ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
180		if (ret && ret != -ENODEV) {
181			nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
182			return ret;
183		}
184	}
185
186	if (volt) {
187		ret = nvkm_volt_set_id(volt, cstate->voltage,
188				       pstate->base.voltage, clk->temp, +1);
189		if (ret && ret != -ENODEV) {
190			nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
191			return ret;
192		}
193	}
194
195	ret = clk->func->calc(clk, cstate);
196	if (ret == 0) {
197		ret = clk->func->prog(clk);
198		clk->func->tidy(clk);
199	}
200
201	if (volt) {
202		ret = nvkm_volt_set_id(volt, cstate->voltage,
203				       pstate->base.voltage, clk->temp, -1);
204		if (ret && ret != -ENODEV)
205			nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
206	}
207
208	if (therm) {
209		ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
210		if (ret && ret != -ENODEV)
211			nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
212	}
213
214	return ret;
215}
216
217static void
218nvkm_cstate_del(struct nvkm_cstate *cstate)
219{
220	list_del(&cstate->head);
221	kfree(cstate);
222}
223
224static int
225nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
226{
227	struct nvkm_bios *bios = clk->subdev.device->bios;
228	struct nvkm_volt *volt = clk->subdev.device->volt;
229	const struct nvkm_domain *domain = clk->domains;
230	struct nvkm_cstate *cstate = NULL;
231	struct nvbios_cstepX cstepX;
232	u8  ver, hdr;
233	u32 data;
234
235	data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
236	if (!data)
237		return -ENOENT;
238
239	if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
240		return -EINVAL;
241
242	cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
243	if (!cstate)
244		return -ENOMEM;
245
246	*cstate = pstate->base;
247	cstate->voltage = cstepX.voltage;
248	cstate->id = idx;
249
250	while (domain && domain->name != nv_clk_src_max) {
251		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
252			u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
253						   domain->bios, cstepX.freq);
254			cstate->domain[domain->name] = freq;
255		}
256		domain++;
257	}
258
259	list_add(&cstate->head, &pstate->list);
260	return 0;
261}
262
263/******************************************************************************
264 * P-States
265 *****************************************************************************/
266static int
267nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
268{
269	struct nvkm_subdev *subdev = &clk->subdev;
270	struct nvkm_fb *fb = subdev->device->fb;
271	struct nvkm_pci *pci = subdev->device->pci;
272	struct nvkm_pstate *pstate;
273	int ret, idx = 0;
274
275	list_for_each_entry(pstate, &clk->states, head) {
276		if (idx++ == pstatei)
277			break;
278	}
279
280	nvkm_debug(subdev, "setting performance state %d\n", pstatei);
281	clk->pstate = pstatei;
282
283	nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
284
285	if (fb && fb->ram && fb->ram->func->calc) {
286		struct nvkm_ram *ram = fb->ram;
287		int khz = pstate->base.domain[nv_clk_src_mem];
288		do {
289			ret = ram->func->calc(ram, khz);
290			if (ret == 0)
291				ret = ram->func->prog(ram);
292		} while (ret > 0);
293		ram->func->tidy(ram);
294	}
295
296	return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
297}
298
299static void
300nvkm_pstate_work(struct work_struct *work)
301{
302	struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
303	struct nvkm_subdev *subdev = &clk->subdev;
304	int pstate;
305
306	if (!atomic_xchg(&clk->waiting, 0))
307		return;
308	clk->pwrsrc = power_supply_is_system_supplied();
309
310	nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d��C D %d\n",
311		   clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
312		   clk->astate, clk->temp, clk->dstate);
313
314	pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
315	if (clk->state_nr && pstate != -1) {
316		pstate = (pstate < 0) ? clk->astate : pstate;
317		pstate = min(pstate, clk->state_nr - 1);
318		pstate = max(pstate, clk->dstate);
319	} else {
320		pstate = clk->pstate = -1;
321	}
322
323	nvkm_trace(subdev, "-> %d\n", pstate);
324	if (pstate != clk->pstate) {
325		int ret = nvkm_pstate_prog(clk, pstate);
326		if (ret) {
327			nvkm_error(subdev, "error setting pstate %d: %d\n",
328				   pstate, ret);
329		}
330	}
331
332	wake_up_all(&clk->wait);
333}
334
335static int
336nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
337{
338	atomic_set(&clk->waiting, 1);
339	schedule_work(&clk->work);
340	if (wait)
341		wait_event(clk->wait, !atomic_read(&clk->waiting));
342	return 0;
343}
344
345static void
346nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
347{
348	const struct nvkm_domain *clock = clk->domains - 1;
349	struct nvkm_cstate *cstate;
350	struct nvkm_subdev *subdev = &clk->subdev;
351	char info[3][32] = { "", "", "" };
352	char name[4] = "--";
353	int i = -1;
354
355	if (pstate->pstate != 0xff)
356		snprintf(name, sizeof(name), "%02x", pstate->pstate);
357
358	while ((++clock)->name != nv_clk_src_max) {
359		u32 lo = pstate->base.domain[clock->name];
360		u32 hi = lo;
361		if (hi == 0)
362			continue;
363
364		nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
365		list_for_each_entry(cstate, &pstate->list, head) {
366			u32 freq = cstate->domain[clock->name];
367			lo = min(lo, freq);
368			hi = max(hi, freq);
369			nvkm_debug(subdev, "%10d KHz\n", freq);
370		}
371
372		if (clock->mname && ++i < ARRAY_SIZE(info)) {
373			lo /= clock->mdiv;
374			hi /= clock->mdiv;
375			if (lo == hi) {
376				snprintf(info[i], sizeof(info[i]), "%s %d MHz",
377					 clock->mname, lo);
378			} else {
379				snprintf(info[i], sizeof(info[i]),
380					 "%s %d-%d MHz", clock->mname, lo, hi);
381			}
382		}
383	}
384
385	nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
386}
387
388static void
389nvkm_pstate_del(struct nvkm_pstate *pstate)
390{
391	struct nvkm_cstate *cstate, *temp;
392
393	list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
394		nvkm_cstate_del(cstate);
395	}
396
397	list_del(&pstate->head);
398	kfree(pstate);
399}
400
401static int
402nvkm_pstate_new(struct nvkm_clk *clk, int idx)
403{
404	struct nvkm_bios *bios = clk->subdev.device->bios;
405	const struct nvkm_domain *domain = clk->domains - 1;
406	struct nvkm_pstate *pstate;
407	struct nvkm_cstate *cstate;
408	struct nvbios_cstepE cstepE;
409	struct nvbios_perfE perfE;
410	u8  ver, hdr, cnt, len;
411	u32 data;
412
413	data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
414	if (!data)
415		return -EINVAL;
416	if (perfE.pstate == 0xff)
417		return 0;
418
419	pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
420	if (!pstate)
421		return -ENOMEM;
422
423	INIT_LIST_HEAD(&pstate->list);
424
425	pstate->pstate = perfE.pstate;
426	pstate->fanspeed = perfE.fanspeed;
427	pstate->pcie_speed = perfE.pcie_speed;
428	pstate->pcie_width = perfE.pcie_width;
429	cstate = &pstate->base;
430	cstate->voltage = perfE.voltage;
431	cstate->domain[nv_clk_src_core] = perfE.core;
432	cstate->domain[nv_clk_src_shader] = perfE.shader;
433	cstate->domain[nv_clk_src_mem] = perfE.memory;
434	cstate->domain[nv_clk_src_vdec] = perfE.vdec;
435	cstate->domain[nv_clk_src_dom6] = perfE.disp;
436
437	while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
438		struct nvbios_perfS perfS;
439		u8  sver = ver, shdr = hdr;
440		u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
441					  &sver, &shdr, cnt, len, &perfS);
442		if (perfSe == 0 || sver != 0x40)
443			continue;
444
445		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
446			perfS.v40.freq = nvkm_clk_adjust(clk, false,
447							 pstate->pstate,
448							 domain->bios,
449							 perfS.v40.freq);
450		}
451
452		cstate->domain[domain->name] = perfS.v40.freq;
453	}
454
455	data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
456	if (data) {
457		int idx = cstepE.index;
458		do {
459			nvkm_cstate_new(clk, idx, pstate);
460		} while(idx--);
461	}
462
463	nvkm_pstate_info(clk, pstate);
464	list_add_tail(&pstate->head, &clk->states);
465	clk->state_nr++;
466	return 0;
467}
468
469/******************************************************************************
470 * Adjustment triggers
471 *****************************************************************************/
472static int
473nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
474{
475	struct nvkm_pstate *pstate;
476	int i = 0;
477
478	if (!clk->allow_reclock)
479		return -ENOSYS;
480
481	if (req != -1 && req != -2) {
482		list_for_each_entry(pstate, &clk->states, head) {
483			if (pstate->pstate == req)
484				break;
485			i++;
486		}
487
488		if (pstate->pstate != req)
489			return -EINVAL;
490		req = i;
491	}
492
493	return req + 2;
494}
495
496static int
497nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
498{
499	int ret = 1;
500
501	if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
502		return -2;
503
504	if (strncasecmpz(mode, "disabled", arglen)) {
505		char save = mode[arglen];
506		long v;
507
508		((char *)mode)[arglen] = '\0';
509		if (!kstrtol(mode, 0, &v)) {
510			ret = nvkm_clk_ustate_update(clk, v);
511			if (ret < 0)
512				ret = 1;
513		}
514		((char *)mode)[arglen] = save;
515	}
516
517	return ret - 2;
518}
519
520int
521nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
522{
523	int ret = nvkm_clk_ustate_update(clk, req);
524	if (ret >= 0) {
525		if (ret -= 2, pwr) clk->ustate_ac = ret;
526		else		   clk->ustate_dc = ret;
527		return nvkm_pstate_calc(clk, true);
528	}
529	return ret;
530}
531
532int
533nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
534{
535	if (!rel) clk->astate  = req;
536	if ( rel) clk->astate += rel;
537	clk->astate = min(clk->astate, clk->state_nr - 1);
538	clk->astate = max(clk->astate, 0);
539	return nvkm_pstate_calc(clk, wait);
540}
541
542int
543nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
544{
545	if (clk->temp == temp)
546		return 0;
547	clk->temp = temp;
548	return nvkm_pstate_calc(clk, false);
549}
550
551int
552nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
553{
554	if (!rel) clk->dstate  = req;
555	if ( rel) clk->dstate += rel;
556	clk->dstate = min(clk->dstate, clk->state_nr - 1);
557	clk->dstate = max(clk->dstate, 0);
558	return nvkm_pstate_calc(clk, true);
559}
560
561int
562nvkm_clk_pwrsrc(struct nvkm_device *device)
563{
564	if (device->clk)
565		return nvkm_pstate_calc(device->clk, false);
566	return 0;
567}
568
569/******************************************************************************
570 * subdev base class implementation
571 *****************************************************************************/
572
573int
574nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
575{
576	return clk->func->read(clk, src);
577}
578
579static int
580nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
581{
582	struct nvkm_clk *clk = nvkm_clk(subdev);
583	flush_work(&clk->work);
584	if (clk->func->fini)
585		clk->func->fini(clk);
586	return 0;
587}
588
589static int
590nvkm_clk_init(struct nvkm_subdev *subdev)
591{
592	struct nvkm_clk *clk = nvkm_clk(subdev);
593	const struct nvkm_domain *clock = clk->domains;
594	int ret;
595
596	memset(&clk->bstate, 0x00, sizeof(clk->bstate));
597	INIT_LIST_HEAD(&clk->bstate.list);
598	clk->bstate.pstate = 0xff;
599
600	while (clock->name != nv_clk_src_max) {
601		ret = nvkm_clk_read(clk, clock->name);
602		if (ret < 0) {
603			nvkm_error(subdev, "%02x freq unknown\n", clock->name);
604			return ret;
605		}
606		clk->bstate.base.domain[clock->name] = ret;
607		clock++;
608	}
609
610	nvkm_pstate_info(clk, &clk->bstate);
611
612	if (clk->func->init)
613		return clk->func->init(clk);
614
615	clk->astate = clk->state_nr - 1;
616	clk->dstate = 0;
617	clk->pstate = -1;
618	clk->temp = 90; /* reasonable default value */
619	nvkm_pstate_calc(clk, true);
620	return 0;
621}
622
623static void *
624nvkm_clk_dtor(struct nvkm_subdev *subdev)
625{
626	struct nvkm_clk *clk = nvkm_clk(subdev);
627	struct nvkm_pstate *pstate, *temp;
628
629	/* Early return if the pstates have been provided statically */
630	if (clk->func->pstates)
631		return clk;
632
633	list_for_each_entry_safe(pstate, temp, &clk->states, head) {
634		nvkm_pstate_del(pstate);
635	}
636
637	return clk;
638}
639
640static const struct nvkm_subdev_func
641nvkm_clk = {
642	.dtor = nvkm_clk_dtor,
643	.init = nvkm_clk_init,
644	.fini = nvkm_clk_fini,
645};
646
647int
648nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
649	      enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk *clk)
650{
651	struct nvkm_subdev *subdev = &clk->subdev;
652	struct nvkm_bios *bios = device->bios;
653	int ret, idx, arglen;
654	const char *mode;
655	struct nvbios_vpstate_header h;
656
657	nvkm_subdev_ctor(&nvkm_clk, device, type, inst, subdev);
658
659	if (bios && !nvbios_vpstate_parse(bios, &h)) {
660		struct nvbios_vpstate_entry base, boost;
661		if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
662			clk->boost_khz = boost.clock_mhz * 1000;
663		if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
664			clk->base_khz = base.clock_mhz * 1000;
665	}
666
667	clk->func = func;
668	INIT_LIST_HEAD(&clk->states);
669	clk->domains = func->domains;
670	clk->ustate_ac = -1;
671	clk->ustate_dc = -1;
672	clk->allow_reclock = allow_reclock;
673
674	INIT_WORK(&clk->work, nvkm_pstate_work);
675	init_waitqueue_head(&clk->wait);
676	atomic_set(&clk->waiting, 0);
677
678	/* If no pstates are provided, try and fetch them from the BIOS */
679	if (!func->pstates) {
680		idx = 0;
681		do {
682			ret = nvkm_pstate_new(clk, idx++);
683		} while (ret == 0);
684	} else {
685		for (idx = 0; idx < func->nr_pstates; idx++)
686			list_add_tail(&func->pstates[idx].head, &clk->states);
687		clk->state_nr = func->nr_pstates;
688	}
689
690	mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
691	if (mode) {
692		clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
693		clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
694	}
695
696	mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
697	if (mode)
698		clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
699
700	mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
701	if (mode)
702		clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
703
704	clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
705				       NVKM_CLK_BOOST_NONE);
706	return 0;
707}
708
709int
710nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
711	      enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
712{
713	if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
714		return -ENOMEM;
715	return nvkm_clk_ctor(func, device, type, inst, allow_reclock, *pclk);
716}
717