1306196Sjkim// SPDX-License-Identifier: GPL-2.0-only
2127131Snectar/* Copyright(c) 2024 Intel Corporation */
3127131Snectar
4142429Snectar#include <linux/anon_inodes.h>
5127131Snectar#include <linux/container_of.h>
6127131Snectar#include <linux/device.h>
7127131Snectar#include <linux/file.h>
8127131Snectar#include <linux/init.h>
9127131Snectar#include <linux/kernel.h>
10127131Snectar#include <linux/module.h>
11127131Snectar#include <linux/mutex.h>
12127131Snectar#include <linux/pci.h>
13127131Snectar#include <linux/sizes.h>
14127131Snectar#include <linux/types.h>
15127131Snectar#include <linux/uaccess.h>
16127131Snectar#include <linux/vfio_pci_core.h>
17127131Snectar#include <linux/qat/qat_mig_dev.h>
18127131Snectar
19127131Snectar/*
20215698Ssimon * The migration data of each Intel QAT VF device is encapsulated into a
21215698Ssimon * 4096 bytes block. The data consists of two parts.
22215698Ssimon * The first is a pre-configured set of attributes of the VF being migrated,
23215698Ssimon * which are only set when it is created. This can be migrated during pre-copy
24215698Ssimon * stage and used for a device compatibility check.
25127131Snectar * The second is the VF state. This includes the required MMIO regions and
26127131Snectar * the shadow states maintained by the QAT PF driver. This part can only be
27127131Snectar * saved when the VF is fully quiesced and be migrated during stop-copy stage.
28127131Snectar * Both these 2 parts of data are saved in hierarchical structures including
29127131Snectar * a preamble section and several raw state sections.
30127131Snectar * When the pre-configured part of the migration data is fully retrieved from
31127131Snectar * user space, the preamble section are used to validate the correctness of
32127131Snectar * the data blocks and check the version compatibility. The raw state sections
33127131Snectar * are then used to do a device compatibility check.
34127131Snectar * When the device transits from RESUMING state, the VF states are extracted
35127131Snectar * from the raw state sections of the VF state part of the migration data and
36127131Snectar * then loaded into the device.
37127131Snectar */
38127131Snectar
39127131Snectarstruct qat_vf_migration_file {
40127131Snectar	struct file *filp;
41276864Sjkim	/* protects migration region context */
42276864Sjkim	struct mutex lock;
43127131Snectar	bool disabled;
44127131Snectar	struct qat_vf_core_device *qat_vdev;
45215698Ssimon	ssize_t filled_size;
46215698Ssimon};
47215698Ssimon
48215698Ssimonstruct qat_vf_core_device {
49142429Snectar	struct vfio_pci_core_device core_device;
50215698Ssimon	struct qat_mig_dev *mdev;
51142429Snectar	/* protects migration state */
52142429Snectar	struct mutex state_mutex;
53276864Sjkim	enum vfio_device_mig_state mig_state;
54276864Sjkim	struct qat_vf_migration_file *resuming_migf;
55276864Sjkim	struct qat_vf_migration_file *saving_migf;
56127131Snectar};
57276864Sjkim
58276864Sjkimstatic int qat_vf_pci_open_device(struct vfio_device *core_vdev)
59276864Sjkim{
60276864Sjkim	struct qat_vf_core_device *qat_vdev =
61276864Sjkim		container_of(core_vdev, struct qat_vf_core_device,
62276864Sjkim			     core_device.vdev);
63215698Ssimon	struct vfio_pci_core_device *vdev = &qat_vdev->core_device;
64276864Sjkim	int ret;
65276864Sjkim
66276864Sjkim	ret = vfio_pci_core_enable(vdev);
67276864Sjkim	if (ret)
68276864Sjkim		return ret;
69215698Ssimon
70276864Sjkim	ret = qat_vfmig_open(qat_vdev->mdev);
71127131Snectar	if (ret) {
72127131Snectar		vfio_pci_core_disable(vdev);
73127131Snectar		return ret;
74127131Snectar	}
75127131Snectar	qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
76127131Snectar
77127131Snectar	vfio_pci_core_finish_enable(vdev);
78127131Snectar
79127131Snectar	return 0;
80127131Snectar}
81127131Snectar
82127131Snectarstatic void qat_vf_disable_fd(struct qat_vf_migration_file *migf)
83127131Snectar{
84127131Snectar	mutex_lock(&migf->lock);
85127131Snectar	migf->disabled = true;
86127131Snectar	migf->filp->f_pos = 0;
87127131Snectar	migf->filled_size = 0;
88127131Snectar	mutex_unlock(&migf->lock);
89127131Snectar}
90127131Snectar
91127131Snectarstatic void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev)
92127131Snectar{
93127131Snectar	if (qat_vdev->resuming_migf) {
94127131Snectar		qat_vf_disable_fd(qat_vdev->resuming_migf);
95127131Snectar		fput(qat_vdev->resuming_migf->filp);
96127131Snectar		qat_vdev->resuming_migf = NULL;
97127131Snectar	}
98127131Snectar
99127131Snectar	if (qat_vdev->saving_migf) {
100127131Snectar		qat_vf_disable_fd(qat_vdev->saving_migf);
101127131Snectar		fput(qat_vdev->saving_migf->filp);
102127131Snectar		qat_vdev->saving_migf = NULL;
103127131Snectar	}
104127131Snectar}
105127131Snectar
106127131Snectarstatic void qat_vf_pci_close_device(struct vfio_device *core_vdev)
107127131Snectar{
108127131Snectar	struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
109127131Snectar			struct qat_vf_core_device, core_device.vdev);
110127131Snectar
111127131Snectar	qat_vfmig_close(qat_vdev->mdev);
112127131Snectar	qat_vf_disable_fds(qat_vdev);
113127131Snectar	vfio_pci_core_close_device(core_vdev);
114127131Snectar}
115127131Snectar
116127131Snectarstatic long qat_vf_precopy_ioctl(struct file *filp, unsigned int cmd,
117127131Snectar				 unsigned long arg)
118127131Snectar{
119127131Snectar	struct qat_vf_migration_file *migf = filp->private_data;
120127131Snectar	struct qat_vf_core_device *qat_vdev = migf->qat_vdev;
121127131Snectar	struct qat_mig_dev *mig_dev = qat_vdev->mdev;
122127131Snectar	struct vfio_precopy_info info;
123127131Snectar	loff_t *pos = &filp->f_pos;
124127131Snectar	unsigned long minsz;
125127131Snectar	int ret = 0;
126127131Snectar
127127131Snectar	if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
128127131Snectar		return -ENOTTY;
129127131Snectar
130127131Snectar	minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
131127131Snectar
132127131Snectar	if (copy_from_user(&info, (void __user *)arg, minsz))
133142429Snectar		return -EFAULT;
134127131Snectar	if (info.argsz < minsz)
135127131Snectar		return -EINVAL;
136306196Sjkim
137215698Ssimon	mutex_lock(&qat_vdev->state_mutex);
138215698Ssimon	if (qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY &&
139215698Ssimon	    qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) {
140215698Ssimon		mutex_unlock(&qat_vdev->state_mutex);
141127131Snectar		return -EINVAL;
142127131Snectar	}
143215698Ssimon
144127131Snectar	mutex_lock(&migf->lock);
145127131Snectar	if (migf->disabled) {
146127131Snectar		ret = -ENODEV;
147127131Snectar		goto out;
148127131Snectar	}
149215698Ssimon
150127131Snectar	if (*pos > mig_dev->setup_size) {
151280304Sjkim		ret = -EINVAL;
152127131Snectar		goto out;
153280304Sjkim	}
154127131Snectar
155127131Snectar	info.dirty_bytes = 0;
156127131Snectar	info.initial_bytes = mig_dev->setup_size - *pos;
157127131Snectar
158127131Snectarout:
159127131Snectar	mutex_unlock(&migf->lock);
160127131Snectar	mutex_unlock(&qat_vdev->state_mutex);
161127131Snectar	if (ret)
162127131Snectar		return ret;
163127131Snectar	return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
164280304Sjkim}
165127131Snectar
166127131Snectarstatic ssize_t qat_vf_save_read(struct file *filp, char __user *buf,
167127131Snectar				size_t len, loff_t *pos)
168127131Snectar{
169127131Snectar	struct qat_vf_migration_file *migf = filp->private_data;
170127131Snectar	struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev;
171280304Sjkim	ssize_t done = 0;
172127131Snectar	loff_t *offs;
173127131Snectar	int ret;
174127131Snectar
175127131Snectar	if (pos)
176127131Snectar		return -ESPIPE;
177127131Snectar	offs = &filp->f_pos;
178127131Snectar
179127131Snectar	mutex_lock(&migf->lock);
180127131Snectar	if (*offs > migf->filled_size || *offs < 0) {
181127131Snectar		done = -EINVAL;
182280304Sjkim		goto out_unlock;
183280304Sjkim	}
184280304Sjkim
185280304Sjkim	if (migf->disabled) {
186280304Sjkim		done = -ENODEV;
187280304Sjkim		goto out_unlock;
188280304Sjkim	}
189280304Sjkim
190280304Sjkim	len = min_t(size_t, migf->filled_size - *offs, len);
191280304Sjkim	if (len) {
192280304Sjkim		ret = copy_to_user(buf, mig_dev->state + *offs, len);
193280304Sjkim		if (ret) {
194280304Sjkim			done = -EFAULT;
195280304Sjkim			goto out_unlock;
196280304Sjkim		}
197280304Sjkim		*offs += len;
198280304Sjkim		done = len;
199280304Sjkim	}
200280304Sjkim
201280304Sjkimout_unlock:
202280304Sjkim	mutex_unlock(&migf->lock);
203280304Sjkim	return done;
204280304Sjkim}
205280304Sjkim
206280304Sjkimstatic int qat_vf_release_file(struct inode *inode, struct file *filp)
207280304Sjkim{
208280304Sjkim	struct qat_vf_migration_file *migf = filp->private_data;
209280304Sjkim
210280304Sjkim	qat_vf_disable_fd(migf);
211280304Sjkim	mutex_destroy(&migf->lock);
212280304Sjkim	kfree(migf);
213280304Sjkim
214280304Sjkim	return 0;
215280304Sjkim}
216280304Sjkim
217280304Sjkimstatic const struct file_operations qat_vf_save_fops = {
218280304Sjkim	.owner = THIS_MODULE,
219280304Sjkim	.read = qat_vf_save_read,
220280304Sjkim	.unlocked_ioctl = qat_vf_precopy_ioctl,
221280304Sjkim	.compat_ioctl = compat_ptr_ioctl,
222280304Sjkim	.release = qat_vf_release_file,
223280304Sjkim	.llseek = no_llseek,
224280304Sjkim};
225280304Sjkim
226280304Sjkimstatic int qat_vf_save_state(struct qat_vf_core_device *qat_vdev,
227280304Sjkim			     struct qat_vf_migration_file *migf)
228280304Sjkim{
229280304Sjkim	int ret;
230280304Sjkim
231280304Sjkim	ret = qat_vfmig_save_state(qat_vdev->mdev);
232280304Sjkim	if (ret)
233280304Sjkim		return ret;
234280304Sjkim	migf->filled_size = qat_vdev->mdev->state_size;
235280304Sjkim
236280304Sjkim	return 0;
237280304Sjkim}
238280304Sjkim
239280304Sjkimstatic int qat_vf_save_setup(struct qat_vf_core_device *qat_vdev,
240280304Sjkim			     struct qat_vf_migration_file *migf)
241280304Sjkim{
242280304Sjkim	int ret;
243280304Sjkim
244280304Sjkim	ret = qat_vfmig_save_setup(qat_vdev->mdev);
245280304Sjkim	if (ret)
246280304Sjkim		return ret;
247280304Sjkim	migf->filled_size = qat_vdev->mdev->setup_size;
248280304Sjkim
249280304Sjkim	return 0;
250280304Sjkim}
251280304Sjkim
252280304Sjkim/*
253280304Sjkim * Allocate a file handler for user space and then save the migration data for
254280304Sjkim * the device being migrated. If this is called in the pre-copy stage, save the
255280304Sjkim * pre-configured device data. Otherwise, if this is called in the stop-copy
256280304Sjkim * stage, save the device state. In both cases, update the data size which can
257280304Sjkim * then be read from user space.
258280304Sjkim */
259280304Sjkimstatic struct qat_vf_migration_file *
260280304Sjkimqat_vf_save_device_data(struct qat_vf_core_device *qat_vdev, bool pre_copy)
261280304Sjkim{
262280304Sjkim	struct qat_vf_migration_file *migf;
263280304Sjkim	int ret;
264280304Sjkim
265280304Sjkim	migf = kzalloc(sizeof(*migf), GFP_KERNEL);
266280304Sjkim	if (!migf)
267280304Sjkim		return ERR_PTR(-ENOMEM);
268127131Snectar
269127131Snectar	migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops,
270127131Snectar					migf, O_RDONLY);
271127131Snectar	ret = PTR_ERR_OR_ZERO(migf->filp);
272127131Snectar	if (ret) {
273142429Snectar		kfree(migf);
274267258Sjkim		return ERR_PTR(ret);
275127131Snectar	}
276127131Snectar
277127131Snectar	stream_open(migf->filp->f_inode, migf->filp);
278	mutex_init(&migf->lock);
279
280	if (pre_copy)
281		ret = qat_vf_save_setup(qat_vdev, migf);
282	else
283		ret = qat_vf_save_state(qat_vdev, migf);
284	if (ret) {
285		fput(migf->filp);
286		return ERR_PTR(ret);
287	}
288
289	migf->qat_vdev = qat_vdev;
290
291	return migf;
292}
293
294static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf,
295				   size_t len, loff_t *pos)
296{
297	struct qat_vf_migration_file *migf = filp->private_data;
298	struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev;
299	loff_t end, *offs;
300	ssize_t done = 0;
301	int ret;
302
303	if (pos)
304		return -ESPIPE;
305	offs = &filp->f_pos;
306
307	if (*offs < 0 ||
308	    check_add_overflow((loff_t)len, *offs, &end))
309		return -EOVERFLOW;
310
311	if (end > mig_dev->state_size)
312		return -ENOMEM;
313
314	mutex_lock(&migf->lock);
315	if (migf->disabled) {
316		done = -ENODEV;
317		goto out_unlock;
318	}
319
320	ret = copy_from_user(mig_dev->state + *offs, buf, len);
321	if (ret) {
322		done = -EFAULT;
323		goto out_unlock;
324	}
325	*offs += len;
326	migf->filled_size += len;
327
328	/*
329	 * Load the pre-configured device data first to check if the target
330	 * device is compatible with the source device.
331	 */
332	ret = qat_vfmig_load_setup(mig_dev, migf->filled_size);
333	if (ret && ret != -EAGAIN) {
334		done = ret;
335		goto out_unlock;
336	}
337	done = len;
338
339out_unlock:
340	mutex_unlock(&migf->lock);
341	return done;
342}
343
344static const struct file_operations qat_vf_resume_fops = {
345	.owner = THIS_MODULE,
346	.write = qat_vf_resume_write,
347	.release = qat_vf_release_file,
348	.llseek = no_llseek,
349};
350
351static struct qat_vf_migration_file *
352qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev)
353{
354	struct qat_vf_migration_file *migf;
355	int ret;
356
357	migf = kzalloc(sizeof(*migf), GFP_KERNEL);
358	if (!migf)
359		return ERR_PTR(-ENOMEM);
360
361	migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops, migf, O_WRONLY);
362	ret = PTR_ERR_OR_ZERO(migf->filp);
363	if (ret) {
364		kfree(migf);
365		return ERR_PTR(ret);
366	}
367
368	migf->qat_vdev = qat_vdev;
369	migf->filled_size = 0;
370	stream_open(migf->filp->f_inode, migf->filp);
371	mutex_init(&migf->lock);
372
373	return migf;
374}
375
376static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev)
377{
378	return qat_vfmig_load_state(qat_vdev->mdev);
379}
380
381static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new)
382{
383	u32 cur = qat_vdev->mig_state;
384	int ret;
385
386	/*
387	 * As the device is not capable of just stopping P2P DMAs, suspend the
388	 * device completely once any of the P2P states are reached.
389	 * When it is suspended, all its MMIO registers can still be operated
390	 * correctly, jobs submitted through ring are queued while no jobs are
391	 * processed by the device. The MMIO states can be safely migrated to
392	 * the target VF during stop-copy stage and restored correctly in the
393	 * target VF. All queued jobs can be resumed then.
394	 */
395	if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
396	    (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
397		ret = qat_vfmig_suspend(qat_vdev->mdev);
398		if (ret)
399			return ERR_PTR(ret);
400		return NULL;
401	}
402
403	if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) ||
404	    (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) {
405		qat_vfmig_resume(qat_vdev->mdev);
406		return NULL;
407	}
408
409	if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
410	    (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
411		return NULL;
412
413	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
414		struct qat_vf_migration_file *migf;
415
416		migf = qat_vf_save_device_data(qat_vdev, false);
417		if (IS_ERR(migf))
418			return ERR_CAST(migf);
419		get_file(migf->filp);
420		qat_vdev->saving_migf = migf;
421		return migf->filp;
422	}
423
424	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
425		struct qat_vf_migration_file *migf;
426
427		migf = qat_vf_resume_device_data(qat_vdev);
428		if (IS_ERR(migf))
429			return ERR_CAST(migf);
430		get_file(migf->filp);
431		qat_vdev->resuming_migf = migf;
432		return migf->filp;
433	}
434
435	if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) ||
436	    (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
437	    (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) {
438		qat_vf_disable_fds(qat_vdev);
439		return NULL;
440	}
441
442	if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) ||
443	    (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
444		struct qat_vf_migration_file *migf;
445
446		migf = qat_vf_save_device_data(qat_vdev, true);
447		if (IS_ERR(migf))
448			return ERR_CAST(migf);
449		get_file(migf->filp);
450		qat_vdev->saving_migf = migf;
451		return migf->filp;
452	}
453
454	if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) {
455		struct qat_vf_migration_file *migf = qat_vdev->saving_migf;
456
457		if (!migf)
458			return ERR_PTR(-EINVAL);
459		ret = qat_vf_save_state(qat_vdev, migf);
460		if (ret)
461			return ERR_PTR(ret);
462		return NULL;
463	}
464
465	if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
466		ret = qat_vf_load_device_data(qat_vdev);
467		if (ret)
468			return ERR_PTR(ret);
469
470		qat_vf_disable_fds(qat_vdev);
471		return NULL;
472	}
473
474	/* vfio_mig_get_next_state() does not use arcs other than the above */
475	WARN_ON(true);
476	return ERR_PTR(-EINVAL);
477}
478
479static void qat_vf_reset_done(struct qat_vf_core_device *qat_vdev)
480{
481	qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
482	qat_vfmig_reset(qat_vdev->mdev);
483	qat_vf_disable_fds(qat_vdev);
484}
485
486static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev,
487						enum vfio_device_mig_state new_state)
488{
489	struct qat_vf_core_device *qat_vdev = container_of(vdev,
490			struct qat_vf_core_device, core_device.vdev);
491	enum vfio_device_mig_state next_state;
492	struct file *res = NULL;
493	int ret;
494
495	mutex_lock(&qat_vdev->state_mutex);
496	while (new_state != qat_vdev->mig_state) {
497		ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state,
498					      new_state, &next_state);
499		if (ret) {
500			res = ERR_PTR(ret);
501			break;
502		}
503		res = qat_vf_pci_step_device_state(qat_vdev, next_state);
504		if (IS_ERR(res))
505			break;
506		qat_vdev->mig_state = next_state;
507		if (WARN_ON(res && new_state != qat_vdev->mig_state)) {
508			fput(res);
509			res = ERR_PTR(-EINVAL);
510			break;
511		}
512	}
513	mutex_unlock(&qat_vdev->state_mutex);
514
515	return res;
516}
517
518static int qat_vf_pci_get_device_state(struct vfio_device *vdev,
519				       enum vfio_device_mig_state *curr_state)
520{
521	struct qat_vf_core_device *qat_vdev = container_of(vdev,
522			struct qat_vf_core_device, core_device.vdev);
523
524	mutex_lock(&qat_vdev->state_mutex);
525	*curr_state = qat_vdev->mig_state;
526	mutex_unlock(&qat_vdev->state_mutex);
527
528	return 0;
529}
530
531static int qat_vf_pci_get_data_size(struct vfio_device *vdev,
532				    unsigned long *stop_copy_length)
533{
534	struct qat_vf_core_device *qat_vdev = container_of(vdev,
535			struct qat_vf_core_device, core_device.vdev);
536
537	mutex_lock(&qat_vdev->state_mutex);
538	*stop_copy_length = qat_vdev->mdev->state_size;
539	mutex_unlock(&qat_vdev->state_mutex);
540
541	return 0;
542}
543
544static const struct vfio_migration_ops qat_vf_pci_mig_ops = {
545	.migration_set_state = qat_vf_pci_set_device_state,
546	.migration_get_state = qat_vf_pci_get_device_state,
547	.migration_get_data_size = qat_vf_pci_get_data_size,
548};
549
550static void qat_vf_pci_release_dev(struct vfio_device *core_vdev)
551{
552	struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
553			struct qat_vf_core_device, core_device.vdev);
554
555	qat_vfmig_cleanup(qat_vdev->mdev);
556	qat_vfmig_destroy(qat_vdev->mdev);
557	mutex_destroy(&qat_vdev->state_mutex);
558	vfio_pci_core_release_dev(core_vdev);
559}
560
561static int qat_vf_pci_init_dev(struct vfio_device *core_vdev)
562{
563	struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
564			struct qat_vf_core_device, core_device.vdev);
565	struct qat_mig_dev *mdev;
566	struct pci_dev *parent;
567	int ret, vf_id;
568
569	core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P |
570				     VFIO_MIGRATION_PRE_COPY;
571	core_vdev->mig_ops = &qat_vf_pci_mig_ops;
572
573	ret = vfio_pci_core_init_dev(core_vdev);
574	if (ret)
575		return ret;
576
577	mutex_init(&qat_vdev->state_mutex);
578
579	parent = pci_physfn(qat_vdev->core_device.pdev);
580	vf_id = pci_iov_vf_id(qat_vdev->core_device.pdev);
581	if (vf_id < 0) {
582		ret = -ENODEV;
583		goto err_rel;
584	}
585
586	mdev = qat_vfmig_create(parent, vf_id);
587	if (IS_ERR(mdev)) {
588		ret = PTR_ERR(mdev);
589		goto err_rel;
590	}
591
592	ret = qat_vfmig_init(mdev);
593	if (ret)
594		goto err_destroy;
595
596	qat_vdev->mdev = mdev;
597
598	return 0;
599
600err_destroy:
601	qat_vfmig_destroy(mdev);
602err_rel:
603	vfio_pci_core_release_dev(core_vdev);
604	return ret;
605}
606
607static const struct vfio_device_ops qat_vf_pci_ops = {
608	.name = "qat-vf-vfio-pci",
609	.init = qat_vf_pci_init_dev,
610	.release = qat_vf_pci_release_dev,
611	.open_device = qat_vf_pci_open_device,
612	.close_device = qat_vf_pci_close_device,
613	.ioctl = vfio_pci_core_ioctl,
614	.read = vfio_pci_core_read,
615	.write = vfio_pci_core_write,
616	.mmap = vfio_pci_core_mmap,
617	.request = vfio_pci_core_request,
618	.match = vfio_pci_core_match,
619	.bind_iommufd = vfio_iommufd_physical_bind,
620	.unbind_iommufd = vfio_iommufd_physical_unbind,
621	.attach_ioas = vfio_iommufd_physical_attach_ioas,
622	.detach_ioas = vfio_iommufd_physical_detach_ioas,
623};
624
625static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev)
626{
627	struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev);
628
629	return container_of(core_device, struct qat_vf_core_device, core_device);
630}
631
632static void qat_vf_pci_aer_reset_done(struct pci_dev *pdev)
633{
634	struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
635
636	if (!qat_vdev->mdev)
637		return;
638
639	mutex_lock(&qat_vdev->state_mutex);
640	qat_vf_reset_done(qat_vdev);
641	mutex_unlock(&qat_vdev->state_mutex);
642}
643
644static int
645qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
646{
647	struct device *dev = &pdev->dev;
648	struct qat_vf_core_device *qat_vdev;
649	int ret;
650
651	qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops);
652	if (IS_ERR(qat_vdev))
653		return PTR_ERR(qat_vdev);
654
655	pci_set_drvdata(pdev, &qat_vdev->core_device);
656	ret = vfio_pci_core_register_device(&qat_vdev->core_device);
657	if (ret)
658		goto out_put_device;
659
660	return 0;
661
662out_put_device:
663	vfio_put_device(&qat_vdev->core_device.vdev);
664	return ret;
665}
666
667static void qat_vf_vfio_pci_remove(struct pci_dev *pdev)
668{
669	struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
670
671	vfio_pci_core_unregister_device(&qat_vdev->core_device);
672	vfio_put_device(&qat_vdev->core_device.vdev);
673}
674
675static const struct pci_device_id qat_vf_vfio_pci_table[] = {
676	/* Intel QAT GEN4 4xxx VF device */
677	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },
678	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) },
679	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) },
680	{}
681};
682MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
683
684static const struct pci_error_handlers qat_vf_err_handlers = {
685	.reset_done = qat_vf_pci_aer_reset_done,
686	.error_detected = vfio_pci_core_aer_err_detected,
687};
688
689static struct pci_driver qat_vf_vfio_pci_driver = {
690	.name = "qat_vfio_pci",
691	.id_table = qat_vf_vfio_pci_table,
692	.probe = qat_vf_vfio_pci_probe,
693	.remove = qat_vf_vfio_pci_remove,
694	.err_handler = &qat_vf_err_handlers,
695	.driver_managed_dma = true,
696};
697module_pci_driver(qat_vf_vfio_pci_driver);
698
699MODULE_LICENSE("GPL");
700MODULE_AUTHOR("Xin Zeng <xin.zeng@intel.com>");
701MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family");
702MODULE_IMPORT_NS(CRYPTO_QAT);
703