1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Speed Select Interface: Common functions
4 * Copyright (c) 2019, Intel Corporation.
5 * All rights reserved.
6 *
7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8 */
9
10#include <linux/cpufeature.h>
11#include <linux/cpuhotplug.h>
12#include <linux/fs.h>
13#include <linux/hashtable.h>
14#include <linux/miscdevice.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/sched/signal.h>
18#include <linux/slab.h>
19#include <linux/uaccess.h>
20#include <uapi/linux/isst_if.h>
21
22#include <asm/cpu_device_id.h>
23#include <asm/intel-family.h>
24
25#include "isst_if_common.h"
26
27#define MSR_THREAD_ID_INFO	0x53
28#define MSR_PM_LOGICAL_ID	0x54
29#define MSR_CPU_BUS_NUMBER	0x128
30
31static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
32
33static int punit_msr_white_list[] = {
34	MSR_TURBO_RATIO_LIMIT,
35	MSR_CONFIG_TDP_CONTROL,
36	MSR_TURBO_RATIO_LIMIT1,
37	MSR_TURBO_RATIO_LIMIT2,
38	MSR_PM_LOGICAL_ID,
39};
40
41struct isst_valid_cmd_ranges {
42	u16 cmd;
43	u16 sub_cmd_beg;
44	u16 sub_cmd_end;
45};
46
47struct isst_cmd_set_req_type {
48	u16 cmd;
49	u16 sub_cmd;
50	u16 param;
51};
52
53static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
54	{0xD0, 0x00, 0x03},
55	{0x7F, 0x00, 0x0C},
56	{0x7F, 0x10, 0x12},
57	{0x7F, 0x20, 0x23},
58	{0x94, 0x03, 0x03},
59	{0x95, 0x03, 0x03},
60};
61
62static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
63	{0xD0, 0x00, 0x08},
64	{0xD0, 0x01, 0x08},
65	{0xD0, 0x02, 0x08},
66	{0xD0, 0x03, 0x08},
67	{0x7F, 0x02, 0x00},
68	{0x7F, 0x08, 0x00},
69	{0x95, 0x03, 0x03},
70};
71
72struct isst_cmd {
73	struct hlist_node hnode;
74	u64 data;
75	u32 cmd;
76	int cpu;
77	int mbox_cmd_type;
78	u32 param;
79};
80
81static bool isst_hpm_support;
82
83static DECLARE_HASHTABLE(isst_hash, 8);
84static DEFINE_MUTEX(isst_hash_lock);
85
86static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
87			      u32 data)
88{
89	struct isst_cmd *sst_cmd;
90
91	sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
92	if (!sst_cmd)
93		return -ENOMEM;
94
95	sst_cmd->cpu = cpu;
96	sst_cmd->cmd = cmd;
97	sst_cmd->mbox_cmd_type = mbox_cmd_type;
98	sst_cmd->param = param;
99	sst_cmd->data = data;
100
101	hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
102
103	return 0;
104}
105
106static void isst_delete_hash(void)
107{
108	struct isst_cmd *sst_cmd;
109	struct hlist_node *tmp;
110	int i;
111
112	hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
113		hash_del(&sst_cmd->hnode);
114		kfree(sst_cmd);
115	}
116}
117
118/**
119 * isst_store_cmd() - Store command to a hash table
120 * @cmd: Mailbox command.
121 * @sub_cmd: Mailbox sub-command or MSR id.
122 * @cpu: Target CPU for the command
123 * @mbox_cmd_type: Mailbox or MSR command.
124 * @param: Mailbox parameter.
125 * @data: Mailbox request data or MSR data.
126 *
127 * Stores the command to a hash table if there is no such command already
128 * stored. If already stored update the latest parameter and data for the
129 * command.
130 *
131 * Return: Return result of store to hash table, 0 for success, others for
132 * failure.
133 */
134int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
135		   u32 param, u64 data)
136{
137	struct isst_cmd *sst_cmd;
138	int full_cmd, ret;
139
140	full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
141	full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
142	mutex_lock(&isst_hash_lock);
143	hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
144		if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
145		    sst_cmd->mbox_cmd_type == mbox_cmd_type) {
146			sst_cmd->param = param;
147			sst_cmd->data = data;
148			mutex_unlock(&isst_hash_lock);
149			return 0;
150		}
151	}
152
153	ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
154	mutex_unlock(&isst_hash_lock);
155
156	return ret;
157}
158EXPORT_SYMBOL_GPL(isst_store_cmd);
159
160static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
161				     struct isst_cmd *sst_cmd)
162{
163	struct isst_if_mbox_cmd mbox_cmd;
164	int wr_only;
165
166	mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
167	mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
168	mbox_cmd.parameter = sst_cmd->param;
169	mbox_cmd.req_data = sst_cmd->data;
170	mbox_cmd.logical_cpu = sst_cmd->cpu;
171	(cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
172}
173
174/**
175 * isst_resume_common() - Process Resume request
176 *
177 * On resume replay all mailbox commands and MSRs.
178 *
179 * Return: None.
180 */
181void isst_resume_common(void)
182{
183	struct isst_cmd *sst_cmd;
184	int i;
185
186	hash_for_each(isst_hash, i, sst_cmd, hnode) {
187		struct isst_if_cmd_cb *cb;
188
189		if (sst_cmd->mbox_cmd_type) {
190			cb = &punit_callbacks[ISST_IF_DEV_MBOX];
191			if (cb->registered)
192				isst_mbox_resume_command(cb, sst_cmd);
193		} else {
194			wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
195					   sst_cmd->data);
196		}
197	}
198}
199EXPORT_SYMBOL_GPL(isst_resume_common);
200
201static void isst_restore_msr_local(int cpu)
202{
203	struct isst_cmd *sst_cmd;
204	int i;
205
206	mutex_lock(&isst_hash_lock);
207	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
208		if (!punit_msr_white_list[i])
209			break;
210
211		hash_for_each_possible(isst_hash, sst_cmd, hnode,
212				       punit_msr_white_list[i]) {
213			if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
214				wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
215		}
216	}
217	mutex_unlock(&isst_hash_lock);
218}
219
220/**
221 * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
222 * @cmd: Pointer to the command structure to verify.
223 *
224 * Invalid command to PUNIT to may result in instability of the platform.
225 * This function has a whitelist of commands, which are allowed.
226 *
227 * Return: Return true if the command is invalid, else false.
228 */
229bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
230{
231	int i;
232
233	if (cmd->logical_cpu >= nr_cpu_ids)
234		return true;
235
236	for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
237		if (cmd->command == isst_valid_cmds[i].cmd &&
238		    (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
239		     cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
240			return false;
241		}
242	}
243
244	return true;
245}
246EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
247
248/**
249 * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
250 * @cmd: Pointer to the command structure to verify.
251 *
252 * Check if the given mail box level is set request and not a get request.
253 *
254 * Return: Return true if the command is set_req, else false.
255 */
256bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
257{
258	int i;
259
260	for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
261		if (cmd->command == isst_cmd_set_reqs[i].cmd &&
262		    cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
263		    cmd->parameter == isst_cmd_set_reqs[i].param) {
264			return true;
265		}
266	}
267
268	return false;
269}
270EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
271
272static int isst_if_api_version;
273
274static int isst_if_get_platform_info(void __user *argp)
275{
276	struct isst_if_platform_info info;
277
278	info.api_version = isst_if_api_version;
279	info.driver_version = ISST_IF_DRIVER_VERSION;
280	info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
281	info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
282	info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
283
284	if (copy_to_user(argp, &info, sizeof(info)))
285		return -EFAULT;
286
287	return 0;
288}
289
290#define ISST_MAX_BUS_NUMBER	2
291
292struct isst_if_cpu_info {
293	/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
294	int bus_info[ISST_MAX_BUS_NUMBER];
295	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
296	int punit_cpu_id;
297	int numa_node;
298};
299
300struct isst_if_pkg_info {
301	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
302};
303
304static struct isst_if_cpu_info *isst_cpu_info;
305static struct isst_if_pkg_info *isst_pkg_info;
306
307static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
308{
309	struct pci_dev *matched_pci_dev = NULL;
310	struct pci_dev *pci_dev = NULL;
311	struct pci_dev *_pci_dev = NULL;
312	int no_matches = 0, pkg_id;
313	int bus_number;
314
315	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
316	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
317		return NULL;
318
319	pkg_id = topology_physical_package_id(cpu);
320
321	bus_number = isst_cpu_info[cpu].bus_info[bus_no];
322	if (bus_number < 0)
323		return NULL;
324
325	for_each_pci_dev(_pci_dev) {
326		int node;
327
328		if (_pci_dev->bus->number != bus_number ||
329		    _pci_dev->devfn != PCI_DEVFN(dev, fn))
330			continue;
331
332		++no_matches;
333		if (!matched_pci_dev)
334			matched_pci_dev = _pci_dev;
335
336		node = dev_to_node(&_pci_dev->dev);
337		if (node == NUMA_NO_NODE) {
338			pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
339				     cpu, bus_no, dev, fn);
340			continue;
341		}
342
343		if (node == isst_cpu_info[cpu].numa_node) {
344			isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
345
346			pci_dev = _pci_dev;
347			break;
348		}
349	}
350
351	/*
352	 * If there is no numa matched pci_dev, then there can be following cases:
353	 * 1. CONFIG_NUMA is not defined: In this case if there is only single device
354	 *    match, then we don't need numa information. Simply return last match.
355	 *    Othewise return NULL.
356	 * 2. NUMA information is not exposed via _SEG method. In this case it is similar
357	 *    to case 1.
358	 * 3. Numa information doesn't match with CPU numa node and more than one match
359	 *    return NULL.
360	 */
361	if (!pci_dev && no_matches == 1)
362		pci_dev = matched_pci_dev;
363
364	/* Return pci_dev pointer for any matched CPU in the package */
365	if (!pci_dev)
366		pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
367
368	return pci_dev;
369}
370
371/**
372 * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
373 * @cpu: Logical CPU number.
374 * @bus_no: The bus number assigned by the hardware.
375 * @dev: The device number assigned by the hardware.
376 * @fn: The function number assigned by the hardware.
377 *
378 * Using cached bus information, find out the PCI device for a bus number,
379 * device and function.
380 *
381 * Return: Return pci_dev pointer or NULL.
382 */
383struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
384{
385	struct pci_dev *pci_dev;
386
387	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER  || cpu < 0 ||
388	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
389		return NULL;
390
391	pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
392
393	if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
394		return pci_dev;
395
396	return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
397}
398EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
399
400static int isst_if_cpu_online(unsigned int cpu)
401{
402	u64 data;
403	int ret;
404
405	isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
406
407	ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
408	if (ret) {
409		/* This is not a fatal error on MSR mailbox only I/F */
410		isst_cpu_info[cpu].bus_info[0] = -1;
411		isst_cpu_info[cpu].bus_info[1] = -1;
412	} else {
413		isst_cpu_info[cpu].bus_info[0] = data & 0xff;
414		isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
415		isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
416		isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
417	}
418
419	if (isst_hpm_support) {
420
421		ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
422		if (!ret)
423			goto set_punit_id;
424	}
425
426	ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
427	if (ret) {
428		isst_cpu_info[cpu].punit_cpu_id = -1;
429		return ret;
430	}
431
432set_punit_id:
433	isst_cpu_info[cpu].punit_cpu_id = data;
434
435	isst_restore_msr_local(cpu);
436
437	return 0;
438}
439
440static int isst_if_online_id;
441
442static int isst_if_cpu_info_init(void)
443{
444	int ret;
445
446	isst_cpu_info = kcalloc(num_possible_cpus(),
447				sizeof(*isst_cpu_info),
448				GFP_KERNEL);
449	if (!isst_cpu_info)
450		return -ENOMEM;
451
452	isst_pkg_info = kcalloc(topology_max_packages(),
453				sizeof(*isst_pkg_info),
454				GFP_KERNEL);
455	if (!isst_pkg_info) {
456		kfree(isst_cpu_info);
457		return -ENOMEM;
458	}
459
460	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
461				"platform/x86/isst-if:online",
462				isst_if_cpu_online, NULL);
463	if (ret < 0) {
464		kfree(isst_pkg_info);
465		kfree(isst_cpu_info);
466		return ret;
467	}
468
469	isst_if_online_id = ret;
470
471	return 0;
472}
473
474static void isst_if_cpu_info_exit(void)
475{
476	cpuhp_remove_state(isst_if_online_id);
477	kfree(isst_pkg_info);
478	kfree(isst_cpu_info);
479};
480
481static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
482{
483	struct isst_if_cpu_map *cpu_map;
484
485	cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
486	if (cpu_map->logical_cpu >= nr_cpu_ids ||
487	    cpu_map->logical_cpu >= num_possible_cpus())
488		return -EINVAL;
489
490	*write_only = 0;
491	cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
492
493	return 0;
494}
495
496static bool match_punit_msr_white_list(int msr)
497{
498	int i;
499
500	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
501		if (punit_msr_white_list[i] == msr)
502			return true;
503	}
504
505	return false;
506}
507
508static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
509{
510	struct isst_if_msr_cmd *msr_cmd;
511	int ret;
512
513	msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
514
515	if (!match_punit_msr_white_list(msr_cmd->msr))
516		return -EINVAL;
517
518	if (msr_cmd->logical_cpu >= nr_cpu_ids)
519		return -EINVAL;
520
521	if (msr_cmd->read_write) {
522		if (!capable(CAP_SYS_ADMIN))
523			return -EPERM;
524
525		ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
526					 msr_cmd->msr,
527					 msr_cmd->data);
528		*write_only = 1;
529		if (!ret && !resume)
530			ret = isst_store_cmd(0, msr_cmd->msr,
531					     msr_cmd->logical_cpu,
532					     0, 0, msr_cmd->data);
533	} else {
534		u64 data;
535
536		ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
537					 msr_cmd->msr, &data);
538		if (!ret) {
539			msr_cmd->data = data;
540			*write_only = 0;
541		}
542	}
543
544
545	return ret;
546}
547
548static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
549{
550	unsigned char __user *ptr;
551	u32 cmd_count;
552	u8 *cmd_ptr;
553	long ret;
554	int i;
555
556	/* Each multi command has u32 command count as the first field */
557	if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
558		return -EFAULT;
559
560	if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
561		return -EINVAL;
562
563	cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
564	if (!cmd_ptr)
565		return -ENOMEM;
566
567	/* cb->offset points to start of the command after the command count */
568	ptr = argp + cb->offset;
569
570	for (i = 0; i < cmd_count; ++i) {
571		int wr_only;
572
573		if (signal_pending(current)) {
574			ret = -EINTR;
575			break;
576		}
577
578		if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
579			ret = -EFAULT;
580			break;
581		}
582
583		ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
584		if (ret)
585			break;
586
587		if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
588			ret = -EFAULT;
589			break;
590		}
591
592		ptr += cb->cmd_size;
593	}
594
595	kfree(cmd_ptr);
596
597	return i ? i : ret;
598}
599
600static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
601			      unsigned long arg)
602{
603	void __user *argp = (void __user *)arg;
604	struct isst_if_cmd_cb cmd_cb;
605	struct isst_if_cmd_cb *cb;
606	long ret = -ENOTTY;
607	int i;
608
609	switch (cmd) {
610	case ISST_IF_GET_PLATFORM_INFO:
611		ret = isst_if_get_platform_info(argp);
612		break;
613	case ISST_IF_GET_PHY_ID:
614		cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
615		cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
616		cmd_cb.cmd_callback = isst_if_proc_phyid_req;
617		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
618		break;
619	case ISST_IF_IO_CMD:
620		cb = &punit_callbacks[ISST_IF_DEV_MMIO];
621		if (cb->registered)
622			ret = isst_if_exec_multi_cmd(argp, cb);
623		break;
624	case ISST_IF_MBOX_COMMAND:
625		cb = &punit_callbacks[ISST_IF_DEV_MBOX];
626		if (cb->registered)
627			ret = isst_if_exec_multi_cmd(argp, cb);
628		break;
629	case ISST_IF_MSR_COMMAND:
630		cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
631		cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
632		cmd_cb.cmd_callback = isst_if_msr_cmd_req;
633		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
634		break;
635	default:
636		for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
637			struct isst_if_cmd_cb *cb = &punit_callbacks[i];
638			int ret;
639
640			if (cb->def_ioctl) {
641				ret = cb->def_ioctl(file, cmd, arg);
642				if (!ret)
643					return ret;
644			}
645		}
646		break;
647	}
648
649	return ret;
650}
651
652/* Lock to prevent module registration when already opened by user space */
653static DEFINE_MUTEX(punit_misc_dev_open_lock);
654/* Lock to allow one shared misc device for all ISST interfaces */
655static DEFINE_MUTEX(punit_misc_dev_reg_lock);
656static int misc_usage_count;
657static int misc_device_ret;
658static int misc_device_open;
659
660static int isst_if_open(struct inode *inode, struct file *file)
661{
662	int i, ret = 0;
663
664	/* Fail open, if a module is going away */
665	mutex_lock(&punit_misc_dev_open_lock);
666	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
667		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
668
669		if (cb->registered && !try_module_get(cb->owner)) {
670			ret = -ENODEV;
671			break;
672		}
673	}
674	if (ret) {
675		int j;
676
677		for (j = 0; j < i; ++j) {
678			struct isst_if_cmd_cb *cb;
679
680			cb = &punit_callbacks[j];
681			if (cb->registered)
682				module_put(cb->owner);
683		}
684	} else {
685		misc_device_open++;
686	}
687	mutex_unlock(&punit_misc_dev_open_lock);
688
689	return ret;
690}
691
692static int isst_if_relase(struct inode *inode, struct file *f)
693{
694	int i;
695
696	mutex_lock(&punit_misc_dev_open_lock);
697	misc_device_open--;
698	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
699		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
700
701		if (cb->registered)
702			module_put(cb->owner);
703	}
704	mutex_unlock(&punit_misc_dev_open_lock);
705
706	return 0;
707}
708
709static const struct file_operations isst_if_char_driver_ops = {
710	.open = isst_if_open,
711	.unlocked_ioctl = isst_if_def_ioctl,
712	.release = isst_if_relase,
713};
714
715static struct miscdevice isst_if_char_driver = {
716	.minor		= MISC_DYNAMIC_MINOR,
717	.name		= "isst_interface",
718	.fops		= &isst_if_char_driver_ops,
719};
720
721static const struct x86_cpu_id hpm_cpu_ids[] = {
722	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D,	NULL),
723	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X,	NULL),
724	X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT,	NULL),
725	X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X,	NULL),
726	{}
727};
728
729static int isst_misc_reg(void)
730{
731	mutex_lock(&punit_misc_dev_reg_lock);
732	if (misc_device_ret)
733		goto unlock_exit;
734
735	if (!misc_usage_count) {
736		const struct x86_cpu_id *id;
737
738		id = x86_match_cpu(hpm_cpu_ids);
739		if (id)
740			isst_hpm_support = true;
741
742		misc_device_ret = isst_if_cpu_info_init();
743		if (misc_device_ret)
744			goto unlock_exit;
745
746		misc_device_ret = misc_register(&isst_if_char_driver);
747		if (misc_device_ret) {
748			isst_if_cpu_info_exit();
749			goto unlock_exit;
750		}
751	}
752	misc_usage_count++;
753
754unlock_exit:
755	mutex_unlock(&punit_misc_dev_reg_lock);
756
757	return misc_device_ret;
758}
759
760static void isst_misc_unreg(void)
761{
762	mutex_lock(&punit_misc_dev_reg_lock);
763	if (misc_usage_count)
764		misc_usage_count--;
765	if (!misc_usage_count && !misc_device_ret) {
766		misc_deregister(&isst_if_char_driver);
767		isst_if_cpu_info_exit();
768	}
769	mutex_unlock(&punit_misc_dev_reg_lock);
770}
771
772/**
773 * isst_if_cdev_register() - Register callback for IOCTL
774 * @device_type: The device type this callback handling.
775 * @cb:	Callback structure.
776 *
777 * This function registers a callback to device type. On very first call
778 * it will register a misc device, which is used for user kernel interface.
779 * Other calls simply increment ref count. Registry will fail, if the user
780 * already opened misc device for operation. Also if the misc device
781 * creation failed, then it will not try again and all callers will get
782 * failure code.
783 *
784 * Return: Return the return value from the misc creation device or -EINVAL
785 * for unsupported device type.
786 */
787int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
788{
789	int ret;
790
791	if (device_type >= ISST_IF_DEV_MAX)
792		return -EINVAL;
793
794	mutex_lock(&punit_misc_dev_open_lock);
795	/* Device is already open, we don't want to add new callbacks */
796	if (misc_device_open) {
797		mutex_unlock(&punit_misc_dev_open_lock);
798		return -EAGAIN;
799	}
800	if (!cb->api_version)
801		cb->api_version = ISST_IF_API_VERSION;
802	if (cb->api_version > isst_if_api_version)
803		isst_if_api_version = cb->api_version;
804	memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
805	punit_callbacks[device_type].registered = 1;
806	mutex_unlock(&punit_misc_dev_open_lock);
807
808	ret = isst_misc_reg();
809	if (ret) {
810		/*
811		 * No need of mutex as the misc device register failed
812		 * as no one can open device yet. Hence no contention.
813		 */
814		punit_callbacks[device_type].registered = 0;
815		return ret;
816	}
817	return 0;
818}
819EXPORT_SYMBOL_GPL(isst_if_cdev_register);
820
821/**
822 * isst_if_cdev_unregister() - Unregister callback for IOCTL
823 * @device_type: The device type to unregister.
824 *
825 * This function unregisters the previously registered callback. If this
826 * is the last callback unregistering, then misc device is removed.
827 *
828 * Return: None.
829 */
830void isst_if_cdev_unregister(int device_type)
831{
832	isst_misc_unreg();
833	mutex_lock(&punit_misc_dev_open_lock);
834	punit_callbacks[device_type].def_ioctl = NULL;
835	punit_callbacks[device_type].registered = 0;
836	if (device_type == ISST_IF_DEV_MBOX)
837		isst_delete_hash();
838	mutex_unlock(&punit_misc_dev_open_lock);
839}
840EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
841
842MODULE_LICENSE("GPL v2");
843