1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2/* Copyright(c) 2014 - 2020 Intel Corporation */
3#include <linux/mutex.h>
4#include <linux/list.h>
5#include "adf_cfg.h"
6#include "adf_common_drv.h"
7
8static LIST_HEAD(accel_table);
9static LIST_HEAD(vfs_table);
10static DEFINE_MUTEX(table_lock);
11static u32 num_devices;
12static u8 id_map[ADF_MAX_DEVICES];
13
14struct vf_id_map {
15	u32 bdf;
16	u32 id;
17	u32 fake_id;
18	bool attached;
19	struct list_head list;
20};
21
22static int adf_get_vf_id(struct adf_accel_dev *vf)
23{
24	return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
25		PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
26		(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
27}
28
29static int adf_get_vf_num(struct adf_accel_dev *vf)
30{
31	return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
32}
33
34static struct vf_id_map *adf_find_vf(u32 bdf)
35{
36	struct list_head *itr;
37
38	list_for_each(itr, &vfs_table) {
39		struct vf_id_map *ptr =
40			list_entry(itr, struct vf_id_map, list);
41
42		if (ptr->bdf == bdf)
43			return ptr;
44	}
45	return NULL;
46}
47
48static int adf_get_vf_real_id(u32 fake)
49{
50	struct list_head *itr;
51
52	list_for_each(itr, &vfs_table) {
53		struct vf_id_map *ptr =
54			list_entry(itr, struct vf_id_map, list);
55		if (ptr->fake_id == fake)
56			return ptr->id;
57	}
58	return -1;
59}
60
61/**
62 * adf_clean_vf_map() - Cleans VF id mapings
63 * @vf: flag indicating whether mappings is cleaned
64 *	for vfs only or for vfs and pfs
65 *
66 * Function cleans internal ids for virtual functions.
67 */
68void adf_clean_vf_map(bool vf)
69{
70	struct vf_id_map *map;
71	struct list_head *ptr, *tmp;
72
73	mutex_lock(&table_lock);
74	list_for_each_safe(ptr, tmp, &vfs_table) {
75		map = list_entry(ptr, struct vf_id_map, list);
76		if (map->bdf != -1) {
77			id_map[map->id] = 0;
78			num_devices--;
79		}
80
81		if (vf && map->bdf == -1)
82			continue;
83
84		list_del(ptr);
85		kfree(map);
86	}
87	mutex_unlock(&table_lock);
88}
89EXPORT_SYMBOL_GPL(adf_clean_vf_map);
90
91/**
92 * adf_devmgr_update_class_index() - Update internal index
93 * @hw_data:  Pointer to internal device data.
94 *
95 * Function updates internal dev index for VFs
96 */
97void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
98{
99	struct adf_hw_device_class *class = hw_data->dev_class;
100	struct list_head *itr;
101	int i = 0;
102
103	list_for_each(itr, &accel_table) {
104		struct adf_accel_dev *ptr =
105				list_entry(itr, struct adf_accel_dev, list);
106
107		if (ptr->hw_device->dev_class == class)
108			ptr->hw_device->instance_id = i++;
109
110		if (i == class->instances)
111			break;
112	}
113}
114EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
115
116static unsigned int adf_find_free_id(void)
117{
118	unsigned int i;
119
120	for (i = 0; i < ADF_MAX_DEVICES; i++) {
121		if (!id_map[i]) {
122			id_map[i] = 1;
123			return i;
124		}
125	}
126	return ADF_MAX_DEVICES + 1;
127}
128
129/**
130 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
131 * @accel_dev:  Pointer to acceleration device.
132 * @pf:		Corresponding PF if the accel_dev is a VF
133 *
134 * Function adds acceleration device to the acceleration framework.
135 * To be used by QAT device specific drivers.
136 *
137 * Return: 0 on success, error code otherwise.
138 */
139int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
140		       struct adf_accel_dev *pf)
141{
142	struct list_head *itr;
143	int ret = 0;
144
145	if (num_devices == ADF_MAX_DEVICES) {
146		dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
147			ADF_MAX_DEVICES);
148		return -EFAULT;
149	}
150
151	mutex_lock(&table_lock);
152	atomic_set(&accel_dev->ref_count, 0);
153
154	/* PF on host or VF on guest - optimized to remove redundant is_vf */
155	if (!accel_dev->is_vf || !pf) {
156		struct vf_id_map *map;
157
158		list_for_each(itr, &accel_table) {
159			struct adf_accel_dev *ptr =
160				list_entry(itr, struct adf_accel_dev, list);
161
162			if (ptr == accel_dev) {
163				ret = -EEXIST;
164				goto unlock;
165			}
166		}
167
168		list_add_tail(&accel_dev->list, &accel_table);
169		accel_dev->accel_id = adf_find_free_id();
170		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
171			ret = -EFAULT;
172			goto unlock;
173		}
174		num_devices++;
175		map = kzalloc(sizeof(*map), GFP_KERNEL);
176		if (!map) {
177			ret = -ENOMEM;
178			goto unlock;
179		}
180		map->bdf = ~0;
181		map->id = accel_dev->accel_id;
182		map->fake_id = map->id;
183		map->attached = true;
184		list_add_tail(&map->list, &vfs_table);
185	} else if (accel_dev->is_vf && pf) {
186		/* VF on host */
187		struct vf_id_map *map;
188
189		map = adf_find_vf(adf_get_vf_num(accel_dev));
190		if (map) {
191			struct vf_id_map *next;
192
193			accel_dev->accel_id = map->id;
194			list_add_tail(&accel_dev->list, &accel_table);
195			map->fake_id++;
196			map->attached = true;
197			next = list_next_entry(map, list);
198			while (next && &next->list != &vfs_table) {
199				next->fake_id++;
200				next = list_next_entry(next, list);
201			}
202
203			ret = 0;
204			goto unlock;
205		}
206
207		map = kzalloc(sizeof(*map), GFP_KERNEL);
208		if (!map) {
209			ret = -ENOMEM;
210			goto unlock;
211		}
212		accel_dev->accel_id = adf_find_free_id();
213		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
214			kfree(map);
215			ret = -EFAULT;
216			goto unlock;
217		}
218		num_devices++;
219		list_add_tail(&accel_dev->list, &accel_table);
220		map->bdf = adf_get_vf_num(accel_dev);
221		map->id = accel_dev->accel_id;
222		map->fake_id = map->id;
223		map->attached = true;
224		list_add_tail(&map->list, &vfs_table);
225	}
226	mutex_init(&accel_dev->state_lock);
227unlock:
228	mutex_unlock(&table_lock);
229	return ret;
230}
231EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
232
233struct list_head *adf_devmgr_get_head(void)
234{
235	return &accel_table;
236}
237
238/**
239 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
240 * @accel_dev:  Pointer to acceleration device.
241 * @pf:		Corresponding PF if the accel_dev is a VF
242 *
243 * Function removes acceleration device from the acceleration framework.
244 * To be used by QAT device specific drivers.
245 *
246 * Return: void
247 */
248void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
249		       struct adf_accel_dev *pf)
250{
251	mutex_lock(&table_lock);
252	/* PF on host or VF on guest - optimized to remove redundant is_vf */
253	if (!accel_dev->is_vf || !pf) {
254		id_map[accel_dev->accel_id] = 0;
255		num_devices--;
256	} else if (accel_dev->is_vf && pf) {
257		struct vf_id_map *map, *next;
258
259		map = adf_find_vf(adf_get_vf_num(accel_dev));
260		if (!map) {
261			dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
262			goto unlock;
263		}
264		map->fake_id--;
265		map->attached = false;
266		next = list_next_entry(map, list);
267		while (next && &next->list != &vfs_table) {
268			next->fake_id--;
269			next = list_next_entry(next, list);
270		}
271	}
272unlock:
273	mutex_destroy(&accel_dev->state_lock);
274	list_del(&accel_dev->list);
275	mutex_unlock(&table_lock);
276}
277EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
278
279struct adf_accel_dev *adf_devmgr_get_first(void)
280{
281	struct adf_accel_dev *dev = NULL;
282
283	if (!list_empty(&accel_table))
284		dev = list_first_entry(&accel_table, struct adf_accel_dev,
285				       list);
286	return dev;
287}
288
289/**
290 * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
291 * @pci_dev:  Pointer to PCI device.
292 *
293 * Function returns acceleration device associated with the given PCI device.
294 * To be used by QAT device specific drivers.
295 *
296 * Return: pointer to accel_dev or NULL if not found.
297 */
298struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
299{
300	struct list_head *itr;
301
302	mutex_lock(&table_lock);
303	list_for_each(itr, &accel_table) {
304		struct adf_accel_dev *ptr =
305				list_entry(itr, struct adf_accel_dev, list);
306
307		if (ptr->accel_pci_dev.pci_dev == pci_dev) {
308			mutex_unlock(&table_lock);
309			return ptr;
310		}
311	}
312	mutex_unlock(&table_lock);
313	return NULL;
314}
315EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
316
317struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
318{
319	struct list_head *itr;
320	int real_id;
321
322	mutex_lock(&table_lock);
323	real_id = adf_get_vf_real_id(id);
324	if (real_id < 0)
325		goto unlock;
326
327	id = real_id;
328
329	list_for_each(itr, &accel_table) {
330		struct adf_accel_dev *ptr =
331				list_entry(itr, struct adf_accel_dev, list);
332		if (ptr->accel_id == id) {
333			mutex_unlock(&table_lock);
334			return ptr;
335		}
336	}
337unlock:
338	mutex_unlock(&table_lock);
339	return NULL;
340}
341
342int adf_devmgr_verify_id(u32 id)
343{
344	if (id == ADF_CFG_ALL_DEVICES)
345		return 0;
346
347	if (adf_devmgr_get_dev_by_id(id))
348		return 0;
349
350	return -ENODEV;
351}
352
353static int adf_get_num_dettached_vfs(void)
354{
355	struct list_head *itr;
356	int vfs = 0;
357
358	mutex_lock(&table_lock);
359	list_for_each(itr, &vfs_table) {
360		struct vf_id_map *ptr =
361			list_entry(itr, struct vf_id_map, list);
362		if (ptr->bdf != ~0 && !ptr->attached)
363			vfs++;
364	}
365	mutex_unlock(&table_lock);
366	return vfs;
367}
368
369void adf_devmgr_get_num_dev(u32 *num)
370{
371	*num = num_devices - adf_get_num_dettached_vfs();
372}
373
374/**
375 * adf_dev_in_use() - Check whether accel_dev is currently in use
376 * @accel_dev: Pointer to acceleration device.
377 *
378 * To be used by QAT device specific drivers.
379 *
380 * Return: 1 when device is in use, 0 otherwise.
381 */
382int adf_dev_in_use(struct adf_accel_dev *accel_dev)
383{
384	return atomic_read(&accel_dev->ref_count) != 0;
385}
386EXPORT_SYMBOL_GPL(adf_dev_in_use);
387
388/**
389 * adf_dev_get() - Increment accel_dev reference count
390 * @accel_dev: Pointer to acceleration device.
391 *
392 * Increment the accel_dev refcount and if this is the first time
393 * incrementing it during this period the accel_dev is in use,
394 * increment the module refcount too.
395 * To be used by QAT device specific drivers.
396 *
397 * Return: 0 when successful, EFAULT when fail to bump module refcount
398 */
399int adf_dev_get(struct adf_accel_dev *accel_dev)
400{
401	if (atomic_add_return(1, &accel_dev->ref_count) == 1)
402		if (!try_module_get(accel_dev->owner))
403			return -EFAULT;
404	return 0;
405}
406EXPORT_SYMBOL_GPL(adf_dev_get);
407
408/**
409 * adf_dev_put() - Decrement accel_dev reference count
410 * @accel_dev: Pointer to acceleration device.
411 *
412 * Decrement the accel_dev refcount and if this is the last time
413 * decrementing it during this period the accel_dev is in use,
414 * decrement the module refcount too.
415 * To be used by QAT device specific drivers.
416 *
417 * Return: void
418 */
419void adf_dev_put(struct adf_accel_dev *accel_dev)
420{
421	if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
422		module_put(accel_dev->owner);
423}
424EXPORT_SYMBOL_GPL(adf_dev_put);
425
426/**
427 * adf_devmgr_in_reset() - Check whether device is in reset
428 * @accel_dev: Pointer to acceleration device.
429 *
430 * To be used by QAT device specific drivers.
431 *
432 * Return: 1 when the device is being reset, 0 otherwise.
433 */
434int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
435{
436	return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
437}
438EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
439
440/**
441 * adf_dev_started() - Check whether device has started
442 * @accel_dev: Pointer to acceleration device.
443 *
444 * To be used by QAT device specific drivers.
445 *
446 * Return: 1 when the device has started, 0 otherwise
447 */
448int adf_dev_started(struct adf_accel_dev *accel_dev)
449{
450	return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
451}
452EXPORT_SYMBOL_GPL(adf_dev_started);
453