1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
4 */
5
6#include <linux/virtio_pci_admin.h>
7#include "virtio_pci_common.h"
8
9/*
10 * virtio_pci_admin_has_legacy_io - Checks whether the legacy IO
11 * commands are supported
12 * @dev: VF pci_dev
13 *
14 * Returns true on success.
15 */
16bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev)
17{
18	struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
19	struct virtio_pci_device *vp_dev;
20
21	if (!virtio_dev)
22		return false;
23
24	if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
25		return false;
26
27	vp_dev = to_vp_device(virtio_dev);
28
29	if ((vp_dev->admin_vq.supported_cmds & VIRTIO_LEGACY_ADMIN_CMD_BITMAP) ==
30		VIRTIO_LEGACY_ADMIN_CMD_BITMAP)
31		return true;
32	return false;
33}
34EXPORT_SYMBOL_GPL(virtio_pci_admin_has_legacy_io);
35
36static int virtio_pci_admin_legacy_io_write(struct pci_dev *pdev, u16 opcode,
37					    u8 offset, u8 size, u8 *buf)
38{
39	struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
40	struct virtio_admin_cmd_legacy_wr_data *data;
41	struct virtio_admin_cmd cmd = {};
42	struct scatterlist data_sg;
43	int vf_id;
44	int ret;
45
46	if (!virtio_dev)
47		return -ENODEV;
48
49	vf_id = pci_iov_vf_id(pdev);
50	if (vf_id < 0)
51		return vf_id;
52
53	data = kzalloc(sizeof(*data) + size, GFP_KERNEL);
54	if (!data)
55		return -ENOMEM;
56
57	data->offset = offset;
58	memcpy(data->registers, buf, size);
59	sg_init_one(&data_sg, data, sizeof(*data) + size);
60	cmd.opcode = cpu_to_le16(opcode);
61	cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
62	cmd.group_member_id = cpu_to_le64(vf_id + 1);
63	cmd.data_sg = &data_sg;
64	ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
65
66	kfree(data);
67	return ret;
68}
69
70/*
71 * virtio_pci_admin_legacy_io_write_common - Write legacy common configuration
72 * of a member device
73 * @dev: VF pci_dev
74 * @offset: starting byte offset within the common configuration area to write to
75 * @size: size of the data to write
76 * @buf: buffer which holds the data
77 *
78 * Note: caller must serialize access for the given device.
79 * Returns 0 on success, or negative on failure.
80 */
81int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
82					    u8 size, u8 *buf)
83{
84	return virtio_pci_admin_legacy_io_write(pdev,
85					VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE,
86					offset, size, buf);
87}
88EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_write);
89
90/*
91 * virtio_pci_admin_legacy_io_write_device - Write legacy device configuration
92 * of a member device
93 * @dev: VF pci_dev
94 * @offset: starting byte offset within the device configuration area to write to
95 * @size: size of the data to write
96 * @buf: buffer which holds the data
97 *
98 * Note: caller must serialize access for the given device.
99 * Returns 0 on success, or negative on failure.
100 */
101int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
102					    u8 size, u8 *buf)
103{
104	return virtio_pci_admin_legacy_io_write(pdev,
105					VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE,
106					offset, size, buf);
107}
108EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_write);
109
110static int virtio_pci_admin_legacy_io_read(struct pci_dev *pdev, u16 opcode,
111					   u8 offset, u8 size, u8 *buf)
112{
113	struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
114	struct virtio_admin_cmd_legacy_rd_data *data;
115	struct scatterlist data_sg, result_sg;
116	struct virtio_admin_cmd cmd = {};
117	int vf_id;
118	int ret;
119
120	if (!virtio_dev)
121		return -ENODEV;
122
123	vf_id = pci_iov_vf_id(pdev);
124	if (vf_id < 0)
125		return vf_id;
126
127	data = kzalloc(sizeof(*data), GFP_KERNEL);
128	if (!data)
129		return -ENOMEM;
130
131	data->offset = offset;
132	sg_init_one(&data_sg, data, sizeof(*data));
133	sg_init_one(&result_sg, buf, size);
134	cmd.opcode = cpu_to_le16(opcode);
135	cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
136	cmd.group_member_id = cpu_to_le64(vf_id + 1);
137	cmd.data_sg = &data_sg;
138	cmd.result_sg = &result_sg;
139	ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
140
141	kfree(data);
142	return ret;
143}
144
145/*
146 * virtio_pci_admin_legacy_device_io_read - Read legacy device configuration of
147 * a member device
148 * @dev: VF pci_dev
149 * @offset: starting byte offset within the device configuration area to read from
150 * @size: size of the data to be read
151 * @buf: buffer to hold the returned data
152 *
153 * Note: caller must serialize access for the given device.
154 * Returns 0 on success, or negative on failure.
155 */
156int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
157					   u8 size, u8 *buf)
158{
159	return virtio_pci_admin_legacy_io_read(pdev,
160					VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ,
161					offset, size, buf);
162}
163EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_read);
164
165/*
166 * virtio_pci_admin_legacy_common_io_read - Read legacy common configuration of
167 * a member device
168 * @dev: VF pci_dev
169 * @offset: starting byte offset within the common configuration area to read from
170 * @size: size of the data to be read
171 * @buf: buffer to hold the returned data
172 *
173 * Note: caller must serialize access for the given device.
174 * Returns 0 on success, or negative on failure.
175 */
176int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
177					   u8 size, u8 *buf)
178{
179	return virtio_pci_admin_legacy_io_read(pdev,
180					VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ,
181					offset, size, buf);
182}
183EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_read);
184
185/*
186 * virtio_pci_admin_legacy_io_notify_info - Read the queue notification
187 * information for legacy interface
188 * @dev: VF pci_dev
189 * @req_bar_flags: requested bar flags
190 * @bar: on output the BAR number of the owner or member device
191 * @bar_offset: on output the offset within bar
192 *
193 * Returns 0 on success, or negative on failure.
194 */
195int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
196					   u8 req_bar_flags, u8 *bar,
197					   u64 *bar_offset)
198{
199	struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
200	struct virtio_admin_cmd_notify_info_result *result;
201	struct virtio_admin_cmd cmd = {};
202	struct scatterlist result_sg;
203	int vf_id;
204	int ret;
205
206	if (!virtio_dev)
207		return -ENODEV;
208
209	vf_id = pci_iov_vf_id(pdev);
210	if (vf_id < 0)
211		return vf_id;
212
213	result = kzalloc(sizeof(*result), GFP_KERNEL);
214	if (!result)
215		return -ENOMEM;
216
217	sg_init_one(&result_sg, result, sizeof(*result));
218	cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO);
219	cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
220	cmd.group_member_id = cpu_to_le64(vf_id + 1);
221	cmd.result_sg = &result_sg;
222	ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
223	if (!ret) {
224		struct virtio_admin_cmd_notify_info_data *entry;
225		int i;
226
227		ret = -ENOENT;
228		for (i = 0; i < VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO; i++) {
229			entry = &result->entries[i];
230			if (entry->flags == VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END)
231				break;
232			if (entry->flags != req_bar_flags)
233				continue;
234			*bar = entry->bar;
235			*bar_offset = le64_to_cpu(entry->offset);
236			ret = 0;
237			break;
238		}
239	}
240
241	kfree(result);
242	return ret;
243}
244EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_io_notify_info);
245