1// SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Virtio crypto device.
3  *
4  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5  */
6
7#include <linux/err.h>
8#include <linux/module.h>
9#include <linux/virtio_config.h>
10#include <linux/cpu.h>
11
12#include <uapi/linux/virtio_crypto.h>
13#include "virtio_crypto_common.h"
14
15
16void
17virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
18{
19	if (vc_req) {
20		kfree_sensitive(vc_req->req_data);
21		kfree(vc_req->sgs);
22	}
23}
24
25static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
26{
27	complete(&vc_ctrl_req->compl);
28}
29
30static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
31{
32	struct virtio_crypto *vcrypto = vq->vdev->priv;
33	struct virtio_crypto_ctrl_request *vc_ctrl_req;
34	unsigned long flags;
35	unsigned int len;
36
37	spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
38	do {
39		virtqueue_disable_cb(vq);
40		while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
41			spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
42			virtio_crypto_ctrlq_callback(vc_ctrl_req);
43			spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
44		}
45	} while (!virtqueue_enable_cb(vq));
46	spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
47}
48
49int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
50		unsigned int out_sgs, unsigned int in_sgs,
51		struct virtio_crypto_ctrl_request *vc_ctrl_req)
52{
53	int err;
54	unsigned long flags;
55
56	init_completion(&vc_ctrl_req->compl);
57
58	spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
59	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
60	if (err < 0) {
61		spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
62		return err;
63	}
64
65	virtqueue_kick(vcrypto->ctrl_vq);
66	spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
67
68	wait_for_completion(&vc_ctrl_req->compl);
69
70	return 0;
71}
72
73static void virtcrypto_done_task(unsigned long data)
74{
75	struct data_queue *data_vq = (struct data_queue *)data;
76	struct virtqueue *vq = data_vq->vq;
77	struct virtio_crypto_request *vc_req;
78	unsigned int len;
79
80	do {
81		virtqueue_disable_cb(vq);
82		while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
83			if (vc_req->alg_cb)
84				vc_req->alg_cb(vc_req, len);
85		}
86	} while (!virtqueue_enable_cb(vq));
87}
88
89static void virtcrypto_dataq_callback(struct virtqueue *vq)
90{
91	struct virtio_crypto *vcrypto = vq->vdev->priv;
92	struct data_queue *dq = &vcrypto->data_vq[vq->index];
93
94	tasklet_schedule(&dq->done_task);
95}
96
97static int virtcrypto_find_vqs(struct virtio_crypto *vi)
98{
99	vq_callback_t **callbacks;
100	struct virtqueue **vqs;
101	int ret = -ENOMEM;
102	int i, total_vqs;
103	const char **names;
104	struct device *dev = &vi->vdev->dev;
105
106	/*
107	 * We expect 1 data virtqueue, followed by
108	 * possible N-1 data queues used in multiqueue mode,
109	 * followed by control vq.
110	 */
111	total_vqs = vi->max_data_queues + 1;
112
113	/* Allocate space for find_vqs parameters */
114	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
115	if (!vqs)
116		goto err_vq;
117	callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
118	if (!callbacks)
119		goto err_callback;
120	names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
121	if (!names)
122		goto err_names;
123
124	/* Parameters for control virtqueue */
125	callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
126	names[total_vqs - 1] = "controlq";
127
128	/* Allocate/initialize parameters for data virtqueues */
129	for (i = 0; i < vi->max_data_queues; i++) {
130		callbacks[i] = virtcrypto_dataq_callback;
131		snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
132				"dataq.%d", i);
133		names[i] = vi->data_vq[i].name;
134	}
135
136	ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
137	if (ret)
138		goto err_find;
139
140	vi->ctrl_vq = vqs[total_vqs - 1];
141
142	for (i = 0; i < vi->max_data_queues; i++) {
143		spin_lock_init(&vi->data_vq[i].lock);
144		vi->data_vq[i].vq = vqs[i];
145		/* Initialize crypto engine */
146		vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
147						virtqueue_get_vring_size(vqs[i]));
148		if (!vi->data_vq[i].engine) {
149			ret = -ENOMEM;
150			goto err_engine;
151		}
152		tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
153				(unsigned long)&vi->data_vq[i]);
154	}
155
156	kfree(names);
157	kfree(callbacks);
158	kfree(vqs);
159
160	return 0;
161
162err_engine:
163err_find:
164	kfree(names);
165err_names:
166	kfree(callbacks);
167err_callback:
168	kfree(vqs);
169err_vq:
170	return ret;
171}
172
173static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
174{
175	vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
176				GFP_KERNEL);
177	if (!vi->data_vq)
178		return -ENOMEM;
179
180	return 0;
181}
182
183static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
184{
185	int i;
186
187	if (vi->affinity_hint_set) {
188		for (i = 0; i < vi->max_data_queues; i++)
189			virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
190
191		vi->affinity_hint_set = false;
192	}
193}
194
195static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
196{
197	int i = 0;
198	int cpu;
199
200	/*
201	 * In single queue mode, we don't set the cpu affinity.
202	 */
203	if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
204		virtcrypto_clean_affinity(vcrypto, -1);
205		return;
206	}
207
208	/*
209	 * In multiqueue mode, we let the queue to be private to one cpu
210	 * by setting the affinity hint to eliminate the contention.
211	 *
212	 * TODO: adds cpu hotplug support by register cpu notifier.
213	 *
214	 */
215	for_each_online_cpu(cpu) {
216		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
217		if (++i >= vcrypto->max_data_queues)
218			break;
219	}
220
221	vcrypto->affinity_hint_set = true;
222}
223
224static void virtcrypto_free_queues(struct virtio_crypto *vi)
225{
226	kfree(vi->data_vq);
227}
228
229static int virtcrypto_init_vqs(struct virtio_crypto *vi)
230{
231	int ret;
232
233	/* Allocate send & receive queues */
234	ret = virtcrypto_alloc_queues(vi);
235	if (ret)
236		goto err;
237
238	ret = virtcrypto_find_vqs(vi);
239	if (ret)
240		goto err_free;
241
242	cpus_read_lock();
243	virtcrypto_set_affinity(vi);
244	cpus_read_unlock();
245
246	return 0;
247
248err_free:
249	virtcrypto_free_queues(vi);
250err:
251	return ret;
252}
253
254static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
255{
256	u32 status;
257	int err;
258
259	virtio_cread_le(vcrypto->vdev,
260			struct virtio_crypto_config, status, &status);
261
262	/*
263	 * Unknown status bits would be a host error and the driver
264	 * should consider the device to be broken.
265	 */
266	if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
267		dev_warn(&vcrypto->vdev->dev,
268				"Unknown status bits: 0x%x\n", status);
269
270		virtio_break_device(vcrypto->vdev);
271		return -EPERM;
272	}
273
274	if (vcrypto->status == status)
275		return 0;
276
277	vcrypto->status = status;
278
279	if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
280		err = virtcrypto_dev_start(vcrypto);
281		if (err) {
282			dev_err(&vcrypto->vdev->dev,
283				"Failed to start virtio crypto device.\n");
284
285			return -EPERM;
286		}
287		dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
288	} else {
289		virtcrypto_dev_stop(vcrypto);
290		dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
291	}
292
293	return 0;
294}
295
296static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
297{
298	int32_t i;
299	int ret;
300
301	for (i = 0; i < vcrypto->max_data_queues; i++) {
302		if (vcrypto->data_vq[i].engine) {
303			ret = crypto_engine_start(vcrypto->data_vq[i].engine);
304			if (ret)
305				goto err;
306		}
307	}
308
309	return 0;
310
311err:
312	while (--i >= 0)
313		if (vcrypto->data_vq[i].engine)
314			crypto_engine_exit(vcrypto->data_vq[i].engine);
315
316	return ret;
317}
318
319static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
320{
321	u32 i;
322
323	for (i = 0; i < vcrypto->max_data_queues; i++)
324		if (vcrypto->data_vq[i].engine)
325			crypto_engine_exit(vcrypto->data_vq[i].engine);
326}
327
328static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
329{
330	struct virtio_device *vdev = vcrypto->vdev;
331
332	virtcrypto_clean_affinity(vcrypto, -1);
333
334	vdev->config->del_vqs(vdev);
335
336	virtcrypto_free_queues(vcrypto);
337}
338
339static void vcrypto_config_changed_work(struct work_struct *work)
340{
341	struct virtio_crypto *vcrypto =
342		container_of(work, struct virtio_crypto, config_work);
343
344	virtcrypto_update_status(vcrypto);
345}
346
347static int virtcrypto_probe(struct virtio_device *vdev)
348{
349	int err = -EFAULT;
350	struct virtio_crypto *vcrypto;
351	u32 max_data_queues = 0, max_cipher_key_len = 0;
352	u32 max_auth_key_len = 0;
353	u64 max_size = 0;
354	u32 cipher_algo_l = 0;
355	u32 cipher_algo_h = 0;
356	u32 hash_algo = 0;
357	u32 mac_algo_l = 0;
358	u32 mac_algo_h = 0;
359	u32 aead_algo = 0;
360	u32 akcipher_algo = 0;
361	u32 crypto_services = 0;
362
363	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
364		return -ENODEV;
365
366	if (!vdev->config->get) {
367		dev_err(&vdev->dev, "%s failure: config access disabled\n",
368			__func__);
369		return -EINVAL;
370	}
371
372	if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
373		/*
374		 * If the accelerator is connected to a node with no memory
375		 * there is no point in using the accelerator since the remote
376		 * memory transaction will be very slow.
377		 */
378		dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
379		return -EINVAL;
380	}
381
382	vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
383					dev_to_node(&vdev->dev));
384	if (!vcrypto)
385		return -ENOMEM;
386
387	virtio_cread_le(vdev, struct virtio_crypto_config,
388			max_dataqueues, &max_data_queues);
389	if (max_data_queues < 1)
390		max_data_queues = 1;
391
392	virtio_cread_le(vdev, struct virtio_crypto_config,
393			max_cipher_key_len, &max_cipher_key_len);
394	virtio_cread_le(vdev, struct virtio_crypto_config,
395			max_auth_key_len, &max_auth_key_len);
396	virtio_cread_le(vdev, struct virtio_crypto_config,
397			max_size, &max_size);
398	virtio_cread_le(vdev, struct virtio_crypto_config,
399			crypto_services, &crypto_services);
400	virtio_cread_le(vdev, struct virtio_crypto_config,
401			cipher_algo_l, &cipher_algo_l);
402	virtio_cread_le(vdev, struct virtio_crypto_config,
403			cipher_algo_h, &cipher_algo_h);
404	virtio_cread_le(vdev, struct virtio_crypto_config,
405			hash_algo, &hash_algo);
406	virtio_cread_le(vdev, struct virtio_crypto_config,
407			mac_algo_l, &mac_algo_l);
408	virtio_cread_le(vdev, struct virtio_crypto_config,
409			mac_algo_h, &mac_algo_h);
410	virtio_cread_le(vdev, struct virtio_crypto_config,
411			aead_algo, &aead_algo);
412	if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
413		virtio_cread_le(vdev, struct virtio_crypto_config,
414				akcipher_algo, &akcipher_algo);
415
416	/* Add virtio crypto device to global table */
417	err = virtcrypto_devmgr_add_dev(vcrypto);
418	if (err) {
419		dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
420		goto free;
421	}
422	vcrypto->owner = THIS_MODULE;
423	vcrypto = vdev->priv = vcrypto;
424	vcrypto->vdev = vdev;
425
426	spin_lock_init(&vcrypto->ctrl_lock);
427
428	/* Use single data queue as default */
429	vcrypto->curr_queue = 1;
430	vcrypto->max_data_queues = max_data_queues;
431	vcrypto->max_cipher_key_len = max_cipher_key_len;
432	vcrypto->max_auth_key_len = max_auth_key_len;
433	vcrypto->max_size = max_size;
434	vcrypto->crypto_services = crypto_services;
435	vcrypto->cipher_algo_l = cipher_algo_l;
436	vcrypto->cipher_algo_h = cipher_algo_h;
437	vcrypto->mac_algo_l = mac_algo_l;
438	vcrypto->mac_algo_h = mac_algo_h;
439	vcrypto->hash_algo = hash_algo;
440	vcrypto->aead_algo = aead_algo;
441	vcrypto->akcipher_algo = akcipher_algo;
442
443	dev_info(&vdev->dev,
444		"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
445		vcrypto->max_data_queues,
446		vcrypto->max_cipher_key_len,
447		vcrypto->max_auth_key_len,
448		vcrypto->max_size);
449
450	err = virtcrypto_init_vqs(vcrypto);
451	if (err) {
452		dev_err(&vdev->dev, "Failed to initialize vqs.\n");
453		goto free_dev;
454	}
455
456	err = virtcrypto_start_crypto_engines(vcrypto);
457	if (err)
458		goto free_vqs;
459
460	virtio_device_ready(vdev);
461
462	err = virtcrypto_update_status(vcrypto);
463	if (err)
464		goto free_engines;
465
466	INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
467
468	return 0;
469
470free_engines:
471	virtcrypto_clear_crypto_engines(vcrypto);
472free_vqs:
473	virtio_reset_device(vdev);
474	virtcrypto_del_vqs(vcrypto);
475free_dev:
476	virtcrypto_devmgr_rm_dev(vcrypto);
477free:
478	kfree(vcrypto);
479	return err;
480}
481
482static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
483{
484	struct virtio_crypto_request *vc_req;
485	int i;
486	struct virtqueue *vq;
487
488	for (i = 0; i < vcrypto->max_data_queues; i++) {
489		vq = vcrypto->data_vq[i].vq;
490		while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
491			kfree(vc_req->req_data);
492			kfree(vc_req->sgs);
493		}
494		cond_resched();
495	}
496}
497
498static void virtcrypto_remove(struct virtio_device *vdev)
499{
500	struct virtio_crypto *vcrypto = vdev->priv;
501	int i;
502
503	dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
504
505	flush_work(&vcrypto->config_work);
506	if (virtcrypto_dev_started(vcrypto))
507		virtcrypto_dev_stop(vcrypto);
508	for (i = 0; i < vcrypto->max_data_queues; i++)
509		tasklet_kill(&vcrypto->data_vq[i].done_task);
510	virtio_reset_device(vdev);
511	virtcrypto_free_unused_reqs(vcrypto);
512	virtcrypto_clear_crypto_engines(vcrypto);
513	virtcrypto_del_vqs(vcrypto);
514	virtcrypto_devmgr_rm_dev(vcrypto);
515	kfree(vcrypto);
516}
517
518static void virtcrypto_config_changed(struct virtio_device *vdev)
519{
520	struct virtio_crypto *vcrypto = vdev->priv;
521
522	schedule_work(&vcrypto->config_work);
523}
524
525#ifdef CONFIG_PM_SLEEP
526static int virtcrypto_freeze(struct virtio_device *vdev)
527{
528	struct virtio_crypto *vcrypto = vdev->priv;
529
530	flush_work(&vcrypto->config_work);
531	virtio_reset_device(vdev);
532	virtcrypto_free_unused_reqs(vcrypto);
533	if (virtcrypto_dev_started(vcrypto))
534		virtcrypto_dev_stop(vcrypto);
535
536	virtcrypto_clear_crypto_engines(vcrypto);
537	virtcrypto_del_vqs(vcrypto);
538	return 0;
539}
540
541static int virtcrypto_restore(struct virtio_device *vdev)
542{
543	struct virtio_crypto *vcrypto = vdev->priv;
544	int err;
545
546	err = virtcrypto_init_vqs(vcrypto);
547	if (err)
548		return err;
549
550	err = virtcrypto_start_crypto_engines(vcrypto);
551	if (err)
552		goto free_vqs;
553
554	virtio_device_ready(vdev);
555
556	err = virtcrypto_dev_start(vcrypto);
557	if (err) {
558		dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
559		goto free_engines;
560	}
561
562	return 0;
563
564free_engines:
565	virtcrypto_clear_crypto_engines(vcrypto);
566free_vqs:
567	virtio_reset_device(vdev);
568	virtcrypto_del_vqs(vcrypto);
569	return err;
570}
571#endif
572
573static const unsigned int features[] = {
574	/* none */
575};
576
577static const struct virtio_device_id id_table[] = {
578	{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
579	{ 0 },
580};
581
582static struct virtio_driver virtio_crypto_driver = {
583	.driver.name         = KBUILD_MODNAME,
584	.driver.owner        = THIS_MODULE,
585	.feature_table       = features,
586	.feature_table_size  = ARRAY_SIZE(features),
587	.id_table            = id_table,
588	.probe               = virtcrypto_probe,
589	.remove              = virtcrypto_remove,
590	.config_changed = virtcrypto_config_changed,
591#ifdef CONFIG_PM_SLEEP
592	.freeze = virtcrypto_freeze,
593	.restore = virtcrypto_restore,
594#endif
595};
596
597module_virtio_driver(virtio_crypto_driver);
598
599MODULE_DEVICE_TABLE(virtio, id_table);
600MODULE_DESCRIPTION("virtio crypto device driver");
601MODULE_LICENSE("GPL");
602MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
603