1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14	[IDXD_WQT_NONE]		= "none",
15	[IDXD_WQT_KERNEL]	= "kernel",
16	[IDXD_WQT_USER]		= "user",
17};
18
19/* IDXD engine attributes */
20static ssize_t engine_group_id_show(struct device *dev,
21				    struct device_attribute *attr, char *buf)
22{
23	struct idxd_engine *engine = confdev_to_engine(dev);
24
25	if (engine->group)
26		return sysfs_emit(buf, "%d\n", engine->group->id);
27	else
28		return sysfs_emit(buf, "%d\n", -1);
29}
30
31static ssize_t engine_group_id_store(struct device *dev,
32				     struct device_attribute *attr,
33				     const char *buf, size_t count)
34{
35	struct idxd_engine *engine = confdev_to_engine(dev);
36	struct idxd_device *idxd = engine->idxd;
37	long id;
38	int rc;
39	struct idxd_group *prevg;
40
41	rc = kstrtol(buf, 10, &id);
42	if (rc < 0)
43		return -EINVAL;
44
45	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
46		return -EPERM;
47
48	if (id > idxd->max_groups - 1 || id < -1)
49		return -EINVAL;
50
51	if (id == -1) {
52		if (engine->group) {
53			engine->group->num_engines--;
54			engine->group = NULL;
55		}
56		return count;
57	}
58
59	prevg = engine->group;
60
61	if (prevg)
62		prevg->num_engines--;
63	engine->group = idxd->groups[id];
64	engine->group->num_engines++;
65
66	return count;
67}
68
69static struct device_attribute dev_attr_engine_group =
70		__ATTR(group_id, 0644, engine_group_id_show,
71		       engine_group_id_store);
72
73static struct attribute *idxd_engine_attributes[] = {
74	&dev_attr_engine_group.attr,
75	NULL,
76};
77
78static const struct attribute_group idxd_engine_attribute_group = {
79	.attrs = idxd_engine_attributes,
80};
81
82static const struct attribute_group *idxd_engine_attribute_groups[] = {
83	&idxd_engine_attribute_group,
84	NULL,
85};
86
87static void idxd_conf_engine_release(struct device *dev)
88{
89	struct idxd_engine *engine = confdev_to_engine(dev);
90
91	kfree(engine);
92}
93
94const struct device_type idxd_engine_device_type = {
95	.name = "engine",
96	.release = idxd_conf_engine_release,
97	.groups = idxd_engine_attribute_groups,
98};
99
100/* Group attributes */
101
102static void idxd_set_free_rdbufs(struct idxd_device *idxd)
103{
104	int i, rdbufs;
105
106	for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
107		struct idxd_group *g = idxd->groups[i];
108
109		rdbufs += g->rdbufs_reserved;
110	}
111
112	idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
113}
114
115static ssize_t group_read_buffers_reserved_show(struct device *dev,
116						struct device_attribute *attr,
117						char *buf)
118{
119	struct idxd_group *group = confdev_to_group(dev);
120
121	return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
122}
123
124static ssize_t group_tokens_reserved_show(struct device *dev,
125					  struct device_attribute *attr,
126					  char *buf)
127{
128	dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
129	return group_read_buffers_reserved_show(dev, attr, buf);
130}
131
132static ssize_t group_read_buffers_reserved_store(struct device *dev,
133						 struct device_attribute *attr,
134						 const char *buf, size_t count)
135{
136	struct idxd_group *group = confdev_to_group(dev);
137	struct idxd_device *idxd = group->idxd;
138	unsigned long val;
139	int rc;
140
141	rc = kstrtoul(buf, 10, &val);
142	if (rc < 0)
143		return -EINVAL;
144
145	if (idxd->data->type == IDXD_TYPE_IAX)
146		return -EOPNOTSUPP;
147
148	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
149		return -EPERM;
150
151	if (idxd->state == IDXD_DEV_ENABLED)
152		return -EPERM;
153
154	if (val > idxd->max_rdbufs)
155		return -EINVAL;
156
157	if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
158		return -EINVAL;
159
160	group->rdbufs_reserved = val;
161	idxd_set_free_rdbufs(idxd);
162	return count;
163}
164
165static ssize_t group_tokens_reserved_store(struct device *dev,
166					   struct device_attribute *attr,
167					   const char *buf, size_t count)
168{
169	dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
170	return group_read_buffers_reserved_store(dev, attr, buf, count);
171}
172
173static struct device_attribute dev_attr_group_tokens_reserved =
174		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
175		       group_tokens_reserved_store);
176
177static struct device_attribute dev_attr_group_read_buffers_reserved =
178		__ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
179		       group_read_buffers_reserved_store);
180
181static ssize_t group_read_buffers_allowed_show(struct device *dev,
182					       struct device_attribute *attr,
183					       char *buf)
184{
185	struct idxd_group *group = confdev_to_group(dev);
186
187	return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
188}
189
190static ssize_t group_tokens_allowed_show(struct device *dev,
191					 struct device_attribute *attr,
192					 char *buf)
193{
194	dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
195	return group_read_buffers_allowed_show(dev, attr, buf);
196}
197
198static ssize_t group_read_buffers_allowed_store(struct device *dev,
199						struct device_attribute *attr,
200						const char *buf, size_t count)
201{
202	struct idxd_group *group = confdev_to_group(dev);
203	struct idxd_device *idxd = group->idxd;
204	unsigned long val;
205	int rc;
206
207	rc = kstrtoul(buf, 10, &val);
208	if (rc < 0)
209		return -EINVAL;
210
211	if (idxd->data->type == IDXD_TYPE_IAX)
212		return -EOPNOTSUPP;
213
214	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
215		return -EPERM;
216
217	if (idxd->state == IDXD_DEV_ENABLED)
218		return -EPERM;
219
220	if (val < 4 * group->num_engines ||
221	    val > group->rdbufs_reserved + idxd->nr_rdbufs)
222		return -EINVAL;
223
224	group->rdbufs_allowed = val;
225	return count;
226}
227
228static ssize_t group_tokens_allowed_store(struct device *dev,
229					  struct device_attribute *attr,
230					  const char *buf, size_t count)
231{
232	dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
233	return group_read_buffers_allowed_store(dev, attr, buf, count);
234}
235
236static struct device_attribute dev_attr_group_tokens_allowed =
237		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
238		       group_tokens_allowed_store);
239
240static struct device_attribute dev_attr_group_read_buffers_allowed =
241		__ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
242		       group_read_buffers_allowed_store);
243
244static ssize_t group_use_read_buffer_limit_show(struct device *dev,
245						struct device_attribute *attr,
246						char *buf)
247{
248	struct idxd_group *group = confdev_to_group(dev);
249
250	return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
251}
252
253static ssize_t group_use_token_limit_show(struct device *dev,
254					  struct device_attribute *attr,
255					  char *buf)
256{
257	dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
258	return group_use_read_buffer_limit_show(dev, attr, buf);
259}
260
261static ssize_t group_use_read_buffer_limit_store(struct device *dev,
262						 struct device_attribute *attr,
263						 const char *buf, size_t count)
264{
265	struct idxd_group *group = confdev_to_group(dev);
266	struct idxd_device *idxd = group->idxd;
267	unsigned long val;
268	int rc;
269
270	rc = kstrtoul(buf, 10, &val);
271	if (rc < 0)
272		return -EINVAL;
273
274	if (idxd->data->type == IDXD_TYPE_IAX)
275		return -EOPNOTSUPP;
276
277	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
278		return -EPERM;
279
280	if (idxd->state == IDXD_DEV_ENABLED)
281		return -EPERM;
282
283	if (idxd->rdbuf_limit == 0)
284		return -EPERM;
285
286	group->use_rdbuf_limit = !!val;
287	return count;
288}
289
290static ssize_t group_use_token_limit_store(struct device *dev,
291					   struct device_attribute *attr,
292					   const char *buf, size_t count)
293{
294	dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
295	return group_use_read_buffer_limit_store(dev, attr, buf, count);
296}
297
298static struct device_attribute dev_attr_group_use_token_limit =
299		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
300		       group_use_token_limit_store);
301
302static struct device_attribute dev_attr_group_use_read_buffer_limit =
303		__ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
304		       group_use_read_buffer_limit_store);
305
306static ssize_t group_engines_show(struct device *dev,
307				  struct device_attribute *attr, char *buf)
308{
309	struct idxd_group *group = confdev_to_group(dev);
310	int i, rc = 0;
311	struct idxd_device *idxd = group->idxd;
312
313	for (i = 0; i < idxd->max_engines; i++) {
314		struct idxd_engine *engine = idxd->engines[i];
315
316		if (!engine->group)
317			continue;
318
319		if (engine->group->id == group->id)
320			rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
321	}
322
323	if (!rc)
324		return 0;
325	rc--;
326	rc += sysfs_emit_at(buf, rc, "\n");
327
328	return rc;
329}
330
331static struct device_attribute dev_attr_group_engines =
332		__ATTR(engines, 0444, group_engines_show, NULL);
333
334static ssize_t group_work_queues_show(struct device *dev,
335				      struct device_attribute *attr, char *buf)
336{
337	struct idxd_group *group = confdev_to_group(dev);
338	int i, rc = 0;
339	struct idxd_device *idxd = group->idxd;
340
341	for (i = 0; i < idxd->max_wqs; i++) {
342		struct idxd_wq *wq = idxd->wqs[i];
343
344		if (!wq->group)
345			continue;
346
347		if (wq->group->id == group->id)
348			rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
349	}
350
351	if (!rc)
352		return 0;
353	rc--;
354	rc += sysfs_emit_at(buf, rc, "\n");
355
356	return rc;
357}
358
359static struct device_attribute dev_attr_group_work_queues =
360		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
361
362static ssize_t group_traffic_class_a_show(struct device *dev,
363					  struct device_attribute *attr,
364					  char *buf)
365{
366	struct idxd_group *group = confdev_to_group(dev);
367
368	return sysfs_emit(buf, "%d\n", group->tc_a);
369}
370
371static ssize_t group_traffic_class_a_store(struct device *dev,
372					   struct device_attribute *attr,
373					   const char *buf, size_t count)
374{
375	struct idxd_group *group = confdev_to_group(dev);
376	struct idxd_device *idxd = group->idxd;
377	long val;
378	int rc;
379
380	rc = kstrtol(buf, 10, &val);
381	if (rc < 0)
382		return -EINVAL;
383
384	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
385		return -EPERM;
386
387	if (idxd->state == IDXD_DEV_ENABLED)
388		return -EPERM;
389
390	if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
391		return -EPERM;
392
393	if (val < 0 || val > 7)
394		return -EINVAL;
395
396	group->tc_a = val;
397	return count;
398}
399
400static struct device_attribute dev_attr_group_traffic_class_a =
401		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
402		       group_traffic_class_a_store);
403
404static ssize_t group_traffic_class_b_show(struct device *dev,
405					  struct device_attribute *attr,
406					  char *buf)
407{
408	struct idxd_group *group = confdev_to_group(dev);
409
410	return sysfs_emit(buf, "%d\n", group->tc_b);
411}
412
413static ssize_t group_traffic_class_b_store(struct device *dev,
414					   struct device_attribute *attr,
415					   const char *buf, size_t count)
416{
417	struct idxd_group *group = confdev_to_group(dev);
418	struct idxd_device *idxd = group->idxd;
419	long val;
420	int rc;
421
422	rc = kstrtol(buf, 10, &val);
423	if (rc < 0)
424		return -EINVAL;
425
426	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
427		return -EPERM;
428
429	if (idxd->state == IDXD_DEV_ENABLED)
430		return -EPERM;
431
432	if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
433		return -EPERM;
434
435	if (val < 0 || val > 7)
436		return -EINVAL;
437
438	group->tc_b = val;
439	return count;
440}
441
442static struct device_attribute dev_attr_group_traffic_class_b =
443		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
444		       group_traffic_class_b_store);
445
446static ssize_t group_desc_progress_limit_show(struct device *dev,
447					      struct device_attribute *attr,
448					      char *buf)
449{
450	struct idxd_group *group = confdev_to_group(dev);
451
452	return sysfs_emit(buf, "%d\n", group->desc_progress_limit);
453}
454
455static ssize_t group_desc_progress_limit_store(struct device *dev,
456					       struct device_attribute *attr,
457					       const char *buf, size_t count)
458{
459	struct idxd_group *group = confdev_to_group(dev);
460	int val, rc;
461
462	rc = kstrtoint(buf, 10, &val);
463	if (rc < 0)
464		return -EINVAL;
465
466	if (val & ~GENMASK(1, 0))
467		return -EINVAL;
468
469	group->desc_progress_limit = val;
470	return count;
471}
472
473static struct device_attribute dev_attr_group_desc_progress_limit =
474		__ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show,
475		       group_desc_progress_limit_store);
476
477static ssize_t group_batch_progress_limit_show(struct device *dev,
478					       struct device_attribute *attr,
479					       char *buf)
480{
481	struct idxd_group *group = confdev_to_group(dev);
482
483	return sysfs_emit(buf, "%d\n", group->batch_progress_limit);
484}
485
486static ssize_t group_batch_progress_limit_store(struct device *dev,
487						struct device_attribute *attr,
488						const char *buf, size_t count)
489{
490	struct idxd_group *group = confdev_to_group(dev);
491	int val, rc;
492
493	rc = kstrtoint(buf, 10, &val);
494	if (rc < 0)
495		return -EINVAL;
496
497	if (val & ~GENMASK(1, 0))
498		return -EINVAL;
499
500	group->batch_progress_limit = val;
501	return count;
502}
503
504static struct device_attribute dev_attr_group_batch_progress_limit =
505		__ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show,
506		       group_batch_progress_limit_store);
507static struct attribute *idxd_group_attributes[] = {
508	&dev_attr_group_work_queues.attr,
509	&dev_attr_group_engines.attr,
510	&dev_attr_group_use_token_limit.attr,
511	&dev_attr_group_use_read_buffer_limit.attr,
512	&dev_attr_group_tokens_allowed.attr,
513	&dev_attr_group_read_buffers_allowed.attr,
514	&dev_attr_group_tokens_reserved.attr,
515	&dev_attr_group_read_buffers_reserved.attr,
516	&dev_attr_group_traffic_class_a.attr,
517	&dev_attr_group_traffic_class_b.attr,
518	&dev_attr_group_desc_progress_limit.attr,
519	&dev_attr_group_batch_progress_limit.attr,
520	NULL,
521};
522
523static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
524						     struct idxd_device *idxd)
525{
526	return (attr == &dev_attr_group_desc_progress_limit.attr ||
527		attr == &dev_attr_group_batch_progress_limit.attr) &&
528		!idxd->hw.group_cap.progress_limit;
529}
530
531static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr,
532						   struct idxd_device *idxd)
533{
534	/*
535	 * Intel IAA does not support Read Buffer allocation control,
536	 * make these attributes invisible.
537	 */
538	return (attr == &dev_attr_group_use_token_limit.attr ||
539		attr == &dev_attr_group_use_read_buffer_limit.attr ||
540		attr == &dev_attr_group_tokens_allowed.attr ||
541		attr == &dev_attr_group_read_buffers_allowed.attr ||
542		attr == &dev_attr_group_tokens_reserved.attr ||
543		attr == &dev_attr_group_read_buffers_reserved.attr) &&
544		idxd->data->type == IDXD_TYPE_IAX;
545}
546
547static umode_t idxd_group_attr_visible(struct kobject *kobj,
548				       struct attribute *attr, int n)
549{
550	struct device *dev = container_of(kobj, struct device, kobj);
551	struct idxd_group *group = confdev_to_group(dev);
552	struct idxd_device *idxd = group->idxd;
553
554	if (idxd_group_attr_progress_limit_invisible(attr, idxd))
555		return 0;
556
557	if (idxd_group_attr_read_buffers_invisible(attr, idxd))
558		return 0;
559
560	return attr->mode;
561}
562
563static const struct attribute_group idxd_group_attribute_group = {
564	.attrs = idxd_group_attributes,
565	.is_visible = idxd_group_attr_visible,
566};
567
568static const struct attribute_group *idxd_group_attribute_groups[] = {
569	&idxd_group_attribute_group,
570	NULL,
571};
572
573static void idxd_conf_group_release(struct device *dev)
574{
575	struct idxd_group *group = confdev_to_group(dev);
576
577	kfree(group);
578}
579
580const struct device_type idxd_group_device_type = {
581	.name = "group",
582	.release = idxd_conf_group_release,
583	.groups = idxd_group_attribute_groups,
584};
585
586/* IDXD work queue attribs */
587static ssize_t wq_clients_show(struct device *dev,
588			       struct device_attribute *attr, char *buf)
589{
590	struct idxd_wq *wq = confdev_to_wq(dev);
591
592	return sysfs_emit(buf, "%d\n", wq->client_count);
593}
594
595static struct device_attribute dev_attr_wq_clients =
596		__ATTR(clients, 0444, wq_clients_show, NULL);
597
598static ssize_t wq_state_show(struct device *dev,
599			     struct device_attribute *attr, char *buf)
600{
601	struct idxd_wq *wq = confdev_to_wq(dev);
602
603	switch (wq->state) {
604	case IDXD_WQ_DISABLED:
605		return sysfs_emit(buf, "disabled\n");
606	case IDXD_WQ_ENABLED:
607		return sysfs_emit(buf, "enabled\n");
608	}
609
610	return sysfs_emit(buf, "unknown\n");
611}
612
613static struct device_attribute dev_attr_wq_state =
614		__ATTR(state, 0444, wq_state_show, NULL);
615
616static ssize_t wq_group_id_show(struct device *dev,
617				struct device_attribute *attr, char *buf)
618{
619	struct idxd_wq *wq = confdev_to_wq(dev);
620
621	if (wq->group)
622		return sysfs_emit(buf, "%u\n", wq->group->id);
623	else
624		return sysfs_emit(buf, "-1\n");
625}
626
627static ssize_t wq_group_id_store(struct device *dev,
628				 struct device_attribute *attr,
629				 const char *buf, size_t count)
630{
631	struct idxd_wq *wq = confdev_to_wq(dev);
632	struct idxd_device *idxd = wq->idxd;
633	long id;
634	int rc;
635	struct idxd_group *prevg, *group;
636
637	rc = kstrtol(buf, 10, &id);
638	if (rc < 0)
639		return -EINVAL;
640
641	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
642		return -EPERM;
643
644	if (wq->state != IDXD_WQ_DISABLED)
645		return -EPERM;
646
647	if (id > idxd->max_groups - 1 || id < -1)
648		return -EINVAL;
649
650	if (id == -1) {
651		if (wq->group) {
652			wq->group->num_wqs--;
653			wq->group = NULL;
654		}
655		return count;
656	}
657
658	group = idxd->groups[id];
659	prevg = wq->group;
660
661	if (prevg)
662		prevg->num_wqs--;
663	wq->group = group;
664	group->num_wqs++;
665	return count;
666}
667
668static struct device_attribute dev_attr_wq_group_id =
669		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
670
671static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
672			    char *buf)
673{
674	struct idxd_wq *wq = confdev_to_wq(dev);
675
676	return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
677}
678
679static ssize_t wq_mode_store(struct device *dev,
680			     struct device_attribute *attr, const char *buf,
681			     size_t count)
682{
683	struct idxd_wq *wq = confdev_to_wq(dev);
684	struct idxd_device *idxd = wq->idxd;
685
686	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
687		return -EPERM;
688
689	if (wq->state != IDXD_WQ_DISABLED)
690		return -EPERM;
691
692	if (sysfs_streq(buf, "dedicated")) {
693		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
694		wq->threshold = 0;
695	} else if (sysfs_streq(buf, "shared")) {
696		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
697	} else {
698		return -EINVAL;
699	}
700
701	return count;
702}
703
704static struct device_attribute dev_attr_wq_mode =
705		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
706
707static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
708			    char *buf)
709{
710	struct idxd_wq *wq = confdev_to_wq(dev);
711
712	return sysfs_emit(buf, "%u\n", wq->size);
713}
714
715static int total_claimed_wq_size(struct idxd_device *idxd)
716{
717	int i;
718	int wq_size = 0;
719
720	for (i = 0; i < idxd->max_wqs; i++) {
721		struct idxd_wq *wq = idxd->wqs[i];
722
723		wq_size += wq->size;
724	}
725
726	return wq_size;
727}
728
729static ssize_t wq_size_store(struct device *dev,
730			     struct device_attribute *attr, const char *buf,
731			     size_t count)
732{
733	struct idxd_wq *wq = confdev_to_wq(dev);
734	unsigned long size;
735	struct idxd_device *idxd = wq->idxd;
736	int rc;
737
738	rc = kstrtoul(buf, 10, &size);
739	if (rc < 0)
740		return -EINVAL;
741
742	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
743		return -EPERM;
744
745	if (idxd->state == IDXD_DEV_ENABLED)
746		return -EPERM;
747
748	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
749		return -EINVAL;
750
751	wq->size = size;
752	return count;
753}
754
755static struct device_attribute dev_attr_wq_size =
756		__ATTR(size, 0644, wq_size_show, wq_size_store);
757
758static ssize_t wq_priority_show(struct device *dev,
759				struct device_attribute *attr, char *buf)
760{
761	struct idxd_wq *wq = confdev_to_wq(dev);
762
763	return sysfs_emit(buf, "%u\n", wq->priority);
764}
765
766static ssize_t wq_priority_store(struct device *dev,
767				 struct device_attribute *attr,
768				 const char *buf, size_t count)
769{
770	struct idxd_wq *wq = confdev_to_wq(dev);
771	unsigned long prio;
772	struct idxd_device *idxd = wq->idxd;
773	int rc;
774
775	rc = kstrtoul(buf, 10, &prio);
776	if (rc < 0)
777		return -EINVAL;
778
779	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
780		return -EPERM;
781
782	if (wq->state != IDXD_WQ_DISABLED)
783		return -EPERM;
784
785	if (prio > IDXD_MAX_PRIORITY)
786		return -EINVAL;
787
788	wq->priority = prio;
789	return count;
790}
791
792static struct device_attribute dev_attr_wq_priority =
793		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
794
795static ssize_t wq_block_on_fault_show(struct device *dev,
796				      struct device_attribute *attr, char *buf)
797{
798	struct idxd_wq *wq = confdev_to_wq(dev);
799
800	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
801}
802
803static ssize_t wq_block_on_fault_store(struct device *dev,
804				       struct device_attribute *attr,
805				       const char *buf, size_t count)
806{
807	struct idxd_wq *wq = confdev_to_wq(dev);
808	struct idxd_device *idxd = wq->idxd;
809	bool bof;
810	int rc;
811
812	if (!idxd->hw.gen_cap.block_on_fault)
813		return -EOPNOTSUPP;
814
815	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
816		return -EPERM;
817
818	if (wq->state != IDXD_WQ_DISABLED)
819		return -ENXIO;
820
821	rc = kstrtobool(buf, &bof);
822	if (rc < 0)
823		return rc;
824
825	if (bof) {
826		if (test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
827			return -EOPNOTSUPP;
828
829		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
830	} else {
831		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
832	}
833
834	return count;
835}
836
837static struct device_attribute dev_attr_wq_block_on_fault =
838		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
839		       wq_block_on_fault_store);
840
841static ssize_t wq_threshold_show(struct device *dev,
842				 struct device_attribute *attr, char *buf)
843{
844	struct idxd_wq *wq = confdev_to_wq(dev);
845
846	return sysfs_emit(buf, "%u\n", wq->threshold);
847}
848
849static ssize_t wq_threshold_store(struct device *dev,
850				  struct device_attribute *attr,
851				  const char *buf, size_t count)
852{
853	struct idxd_wq *wq = confdev_to_wq(dev);
854	struct idxd_device *idxd = wq->idxd;
855	unsigned int val;
856	int rc;
857
858	rc = kstrtouint(buf, 0, &val);
859	if (rc < 0)
860		return -EINVAL;
861
862	if (val > wq->size || val <= 0)
863		return -EINVAL;
864
865	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
866		return -EPERM;
867
868	if (wq->state != IDXD_WQ_DISABLED)
869		return -ENXIO;
870
871	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
872		return -EINVAL;
873
874	wq->threshold = val;
875
876	return count;
877}
878
879static struct device_attribute dev_attr_wq_threshold =
880		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
881
882static ssize_t wq_type_show(struct device *dev,
883			    struct device_attribute *attr, char *buf)
884{
885	struct idxd_wq *wq = confdev_to_wq(dev);
886
887	switch (wq->type) {
888	case IDXD_WQT_KERNEL:
889		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
890	case IDXD_WQT_USER:
891		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
892	case IDXD_WQT_NONE:
893	default:
894		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
895	}
896
897	return -EINVAL;
898}
899
900static ssize_t wq_type_store(struct device *dev,
901			     struct device_attribute *attr, const char *buf,
902			     size_t count)
903{
904	struct idxd_wq *wq = confdev_to_wq(dev);
905	enum idxd_wq_type old_type;
906
907	if (wq->state != IDXD_WQ_DISABLED)
908		return -EPERM;
909
910	old_type = wq->type;
911	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
912		wq->type = IDXD_WQT_NONE;
913	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
914		wq->type = IDXD_WQT_KERNEL;
915	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
916		wq->type = IDXD_WQT_USER;
917	else
918		return -EINVAL;
919
920	/* If we are changing queue type, clear the name */
921	if (wq->type != old_type)
922		memset(wq->name, 0, WQ_NAME_SIZE + 1);
923
924	return count;
925}
926
927static struct device_attribute dev_attr_wq_type =
928		__ATTR(type, 0644, wq_type_show, wq_type_store);
929
930static ssize_t wq_name_show(struct device *dev,
931			    struct device_attribute *attr, char *buf)
932{
933	struct idxd_wq *wq = confdev_to_wq(dev);
934
935	return sysfs_emit(buf, "%s\n", wq->name);
936}
937
938static ssize_t wq_name_store(struct device *dev,
939			     struct device_attribute *attr, const char *buf,
940			     size_t count)
941{
942	struct idxd_wq *wq = confdev_to_wq(dev);
943	char *input, *pos;
944
945	if (wq->state != IDXD_WQ_DISABLED)
946		return -EPERM;
947
948	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
949		return -EINVAL;
950
951	input = kstrndup(buf, count, GFP_KERNEL);
952	if (!input)
953		return -ENOMEM;
954
955	pos = strim(input);
956	memset(wq->name, 0, WQ_NAME_SIZE + 1);
957	sprintf(wq->name, "%s", pos);
958	kfree(input);
959	return count;
960}
961
962static struct device_attribute dev_attr_wq_name =
963		__ATTR(name, 0644, wq_name_show, wq_name_store);
964
965static ssize_t wq_cdev_minor_show(struct device *dev,
966				  struct device_attribute *attr, char *buf)
967{
968	struct idxd_wq *wq = confdev_to_wq(dev);
969	int minor = -1;
970
971	mutex_lock(&wq->wq_lock);
972	if (wq->idxd_cdev)
973		minor = wq->idxd_cdev->minor;
974	mutex_unlock(&wq->wq_lock);
975
976	if (minor == -1)
977		return -ENXIO;
978	return sysfs_emit(buf, "%d\n", minor);
979}
980
981static struct device_attribute dev_attr_wq_cdev_minor =
982		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
983
984static int __get_sysfs_u64(const char *buf, u64 *val)
985{
986	int rc;
987
988	rc = kstrtou64(buf, 0, val);
989	if (rc < 0)
990		return -EINVAL;
991
992	if (*val == 0)
993		return -EINVAL;
994
995	*val = roundup_pow_of_two(*val);
996	return 0;
997}
998
999static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1000					 char *buf)
1001{
1002	struct idxd_wq *wq = confdev_to_wq(dev);
1003
1004	return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1005}
1006
1007static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1008					  const char *buf, size_t count)
1009{
1010	struct idxd_wq *wq = confdev_to_wq(dev);
1011	struct idxd_device *idxd = wq->idxd;
1012	u64 xfer_size;
1013	int rc;
1014
1015	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1016		return -EPERM;
1017
1018	if (wq->state != IDXD_WQ_DISABLED)
1019		return -EPERM;
1020
1021	rc = __get_sysfs_u64(buf, &xfer_size);
1022	if (rc < 0)
1023		return rc;
1024
1025	if (xfer_size > idxd->max_xfer_bytes)
1026		return -EINVAL;
1027
1028	wq->max_xfer_bytes = xfer_size;
1029
1030	return count;
1031}
1032
1033static struct device_attribute dev_attr_wq_max_transfer_size =
1034		__ATTR(max_transfer_size, 0644,
1035		       wq_max_transfer_size_show, wq_max_transfer_size_store);
1036
1037static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1038{
1039	struct idxd_wq *wq = confdev_to_wq(dev);
1040
1041	return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1042}
1043
1044static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1045				       const char *buf, size_t count)
1046{
1047	struct idxd_wq *wq = confdev_to_wq(dev);
1048	struct idxd_device *idxd = wq->idxd;
1049	u64 batch_size;
1050	int rc;
1051
1052	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1053		return -EPERM;
1054
1055	if (wq->state != IDXD_WQ_DISABLED)
1056		return -EPERM;
1057
1058	rc = __get_sysfs_u64(buf, &batch_size);
1059	if (rc < 0)
1060		return rc;
1061
1062	if (batch_size > idxd->max_batch_size)
1063		return -EINVAL;
1064
1065	idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size);
1066
1067	return count;
1068}
1069
1070static struct device_attribute dev_attr_wq_max_batch_size =
1071		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1072
1073static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1074{
1075	struct idxd_wq *wq = confdev_to_wq(dev);
1076
1077	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags));
1078}
1079
1080static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1081				    const char *buf, size_t count)
1082{
1083	struct idxd_wq *wq = confdev_to_wq(dev);
1084	struct idxd_device *idxd = wq->idxd;
1085	bool ats_dis;
1086	int rc;
1087
1088	if (wq->state != IDXD_WQ_DISABLED)
1089		return -EPERM;
1090
1091	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1092		return -EPERM;
1093
1094	rc = kstrtobool(buf, &ats_dis);
1095	if (rc < 0)
1096		return rc;
1097
1098	if (ats_dis)
1099		set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
1100	else
1101		clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
1102
1103	return count;
1104}
1105
1106static struct device_attribute dev_attr_wq_ats_disable =
1107		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1108
1109static ssize_t wq_prs_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1110{
1111	struct idxd_wq *wq = confdev_to_wq(dev);
1112
1113	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags));
1114}
1115
1116static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute *attr,
1117				    const char *buf, size_t count)
1118{
1119	struct idxd_wq *wq = confdev_to_wq(dev);
1120	struct idxd_device *idxd = wq->idxd;
1121	bool prs_dis;
1122	int rc;
1123
1124	if (wq->state != IDXD_WQ_DISABLED)
1125		return -EPERM;
1126
1127	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1128		return -EPERM;
1129
1130	rc = kstrtobool(buf, &prs_dis);
1131	if (rc < 0)
1132		return rc;
1133
1134	if (prs_dis) {
1135		set_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
1136		/* when PRS is disabled, BOF needs to be off as well */
1137		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1138	} else {
1139		clear_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
1140	}
1141	return count;
1142}
1143
1144static struct device_attribute dev_attr_wq_prs_disable =
1145		__ATTR(prs_disable, 0644, wq_prs_disable_show, wq_prs_disable_store);
1146
1147static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1148{
1149	struct idxd_wq *wq = confdev_to_wq(dev);
1150	struct idxd_device *idxd = wq->idxd;
1151	u32 occup, offset;
1152
1153	if (!idxd->hw.wq_cap.occupancy)
1154		return -EOPNOTSUPP;
1155
1156	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1157	occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1158
1159	return sysfs_emit(buf, "%u\n", occup);
1160}
1161
1162static struct device_attribute dev_attr_wq_occupancy =
1163		__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1164
1165static ssize_t wq_enqcmds_retries_show(struct device *dev,
1166				       struct device_attribute *attr, char *buf)
1167{
1168	struct idxd_wq *wq = confdev_to_wq(dev);
1169
1170	if (wq_dedicated(wq))
1171		return -EOPNOTSUPP;
1172
1173	return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
1174}
1175
1176static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
1177					const char *buf, size_t count)
1178{
1179	struct idxd_wq *wq = confdev_to_wq(dev);
1180	int rc;
1181	unsigned int retries;
1182
1183	if (wq_dedicated(wq))
1184		return -EOPNOTSUPP;
1185
1186	rc = kstrtouint(buf, 10, &retries);
1187	if (rc < 0)
1188		return rc;
1189
1190	if (retries > IDXD_ENQCMDS_MAX_RETRIES)
1191		retries = IDXD_ENQCMDS_MAX_RETRIES;
1192
1193	wq->enqcmds_retries = retries;
1194	return count;
1195}
1196
1197static struct device_attribute dev_attr_wq_enqcmds_retries =
1198		__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
1199
1200static ssize_t wq_op_config_show(struct device *dev,
1201				 struct device_attribute *attr, char *buf)
1202{
1203	struct idxd_wq *wq = confdev_to_wq(dev);
1204
1205	return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
1206}
1207
1208static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
1209{
1210	int bit;
1211
1212	/*
1213	 * The OPCAP is defined as 256 bits that represents each operation the device
1214	 * supports per bit. Iterate through all the bits and check if the input mask
1215	 * is set for bits that are not set in the OPCAP for the device. If no OPCAP
1216	 * bit is set and input mask has the bit set, then return error.
1217	 */
1218	for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) {
1219		if (!test_bit(bit, idxd->opcap_bmap))
1220			return -EINVAL;
1221	}
1222
1223	return 0;
1224}
1225
1226static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr,
1227				  const char *buf, size_t count)
1228{
1229	struct idxd_wq *wq = confdev_to_wq(dev);
1230	struct idxd_device *idxd = wq->idxd;
1231	unsigned long *opmask;
1232	int rc;
1233
1234	if (wq->state != IDXD_WQ_DISABLED)
1235		return -EPERM;
1236
1237	opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
1238	if (!opmask)
1239		return -ENOMEM;
1240
1241	rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS);
1242	if (rc < 0)
1243		goto err;
1244
1245	rc = idxd_verify_supported_opcap(idxd, opmask);
1246	if (rc < 0)
1247		goto err;
1248
1249	bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS);
1250
1251	bitmap_free(opmask);
1252	return count;
1253
1254err:
1255	bitmap_free(opmask);
1256	return rc;
1257}
1258
1259static struct device_attribute dev_attr_wq_op_config =
1260		__ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
1261
1262static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1263{
1264	struct idxd_wq *wq = confdev_to_wq(dev);
1265
1266	return sysfs_emit(buf, "%s\n", wq->driver_name);
1267}
1268
1269static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr,
1270				    const char *buf, size_t count)
1271{
1272	struct idxd_wq *wq = confdev_to_wq(dev);
1273	char *input, *pos;
1274
1275	if (wq->state != IDXD_WQ_DISABLED)
1276		return -EPERM;
1277
1278	if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0)
1279		return -EINVAL;
1280
1281	input = kstrndup(buf, count, GFP_KERNEL);
1282	if (!input)
1283		return -ENOMEM;
1284
1285	pos = strim(input);
1286	memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
1287	sprintf(wq->driver_name, "%s", pos);
1288	kfree(input);
1289	return count;
1290}
1291
1292static struct device_attribute dev_attr_wq_driver_name =
1293		__ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store);
1294
1295static struct attribute *idxd_wq_attributes[] = {
1296	&dev_attr_wq_clients.attr,
1297	&dev_attr_wq_state.attr,
1298	&dev_attr_wq_group_id.attr,
1299	&dev_attr_wq_mode.attr,
1300	&dev_attr_wq_size.attr,
1301	&dev_attr_wq_priority.attr,
1302	&dev_attr_wq_block_on_fault.attr,
1303	&dev_attr_wq_threshold.attr,
1304	&dev_attr_wq_type.attr,
1305	&dev_attr_wq_name.attr,
1306	&dev_attr_wq_cdev_minor.attr,
1307	&dev_attr_wq_max_transfer_size.attr,
1308	&dev_attr_wq_max_batch_size.attr,
1309	&dev_attr_wq_ats_disable.attr,
1310	&dev_attr_wq_prs_disable.attr,
1311	&dev_attr_wq_occupancy.attr,
1312	&dev_attr_wq_enqcmds_retries.attr,
1313	&dev_attr_wq_op_config.attr,
1314	&dev_attr_wq_driver_name.attr,
1315	NULL,
1316};
1317
1318/*  A WQ attr is invisible if the feature is not supported in WQCAP. */
1319#define idxd_wq_attr_invisible(name, cap_field, a, idxd)		\
1320	((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
1321
1322static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
1323						  struct idxd_device *idxd)
1324{
1325	/* Intel IAA does not support batch processing, make it invisible */
1326	return attr == &dev_attr_wq_max_batch_size.attr &&
1327	       idxd->data->type == IDXD_TYPE_IAX;
1328}
1329
1330static umode_t idxd_wq_attr_visible(struct kobject *kobj,
1331				    struct attribute *attr, int n)
1332{
1333	struct device *dev = container_of(kobj, struct device, kobj);
1334	struct idxd_wq *wq = confdev_to_wq(dev);
1335	struct idxd_device *idxd = wq->idxd;
1336
1337	if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
1338		return 0;
1339
1340	if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
1341		return 0;
1342
1343	if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
1344		return 0;
1345
1346	if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
1347		return 0;
1348
1349	return attr->mode;
1350}
1351
1352static const struct attribute_group idxd_wq_attribute_group = {
1353	.attrs = idxd_wq_attributes,
1354	.is_visible = idxd_wq_attr_visible,
1355};
1356
1357static const struct attribute_group *idxd_wq_attribute_groups[] = {
1358	&idxd_wq_attribute_group,
1359	NULL,
1360};
1361
1362static void idxd_conf_wq_release(struct device *dev)
1363{
1364	struct idxd_wq *wq = confdev_to_wq(dev);
1365
1366	bitmap_free(wq->opcap_bmap);
1367	kfree(wq->wqcfg);
1368	xa_destroy(&wq->upasid_xa);
1369	kfree(wq);
1370}
1371
1372const struct device_type idxd_wq_device_type = {
1373	.name = "wq",
1374	.release = idxd_conf_wq_release,
1375	.groups = idxd_wq_attribute_groups,
1376};
1377
1378/* IDXD device attribs */
1379static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1380			    char *buf)
1381{
1382	struct idxd_device *idxd = confdev_to_idxd(dev);
1383
1384	return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1385}
1386static DEVICE_ATTR_RO(version);
1387
1388static ssize_t max_work_queues_size_show(struct device *dev,
1389					 struct device_attribute *attr,
1390					 char *buf)
1391{
1392	struct idxd_device *idxd = confdev_to_idxd(dev);
1393
1394	return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1395}
1396static DEVICE_ATTR_RO(max_work_queues_size);
1397
1398static ssize_t max_groups_show(struct device *dev,
1399			       struct device_attribute *attr, char *buf)
1400{
1401	struct idxd_device *idxd = confdev_to_idxd(dev);
1402
1403	return sysfs_emit(buf, "%u\n", idxd->max_groups);
1404}
1405static DEVICE_ATTR_RO(max_groups);
1406
1407static ssize_t max_work_queues_show(struct device *dev,
1408				    struct device_attribute *attr, char *buf)
1409{
1410	struct idxd_device *idxd = confdev_to_idxd(dev);
1411
1412	return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1413}
1414static DEVICE_ATTR_RO(max_work_queues);
1415
1416static ssize_t max_engines_show(struct device *dev,
1417				struct device_attribute *attr, char *buf)
1418{
1419	struct idxd_device *idxd = confdev_to_idxd(dev);
1420
1421	return sysfs_emit(buf, "%u\n", idxd->max_engines);
1422}
1423static DEVICE_ATTR_RO(max_engines);
1424
1425static ssize_t numa_node_show(struct device *dev,
1426			      struct device_attribute *attr, char *buf)
1427{
1428	struct idxd_device *idxd = confdev_to_idxd(dev);
1429
1430	return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1431}
1432static DEVICE_ATTR_RO(numa_node);
1433
1434static ssize_t max_batch_size_show(struct device *dev,
1435				   struct device_attribute *attr, char *buf)
1436{
1437	struct idxd_device *idxd = confdev_to_idxd(dev);
1438
1439	return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1440}
1441static DEVICE_ATTR_RO(max_batch_size);
1442
1443static ssize_t max_transfer_size_show(struct device *dev,
1444				      struct device_attribute *attr,
1445				      char *buf)
1446{
1447	struct idxd_device *idxd = confdev_to_idxd(dev);
1448
1449	return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1450}
1451static DEVICE_ATTR_RO(max_transfer_size);
1452
1453static ssize_t op_cap_show(struct device *dev,
1454			   struct device_attribute *attr, char *buf)
1455{
1456	struct idxd_device *idxd = confdev_to_idxd(dev);
1457
1458	return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
1459}
1460static DEVICE_ATTR_RO(op_cap);
1461
1462static ssize_t gen_cap_show(struct device *dev,
1463			    struct device_attribute *attr, char *buf)
1464{
1465	struct idxd_device *idxd = confdev_to_idxd(dev);
1466
1467	return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1468}
1469static DEVICE_ATTR_RO(gen_cap);
1470
1471static ssize_t configurable_show(struct device *dev,
1472				 struct device_attribute *attr, char *buf)
1473{
1474	struct idxd_device *idxd = confdev_to_idxd(dev);
1475
1476	return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1477}
1478static DEVICE_ATTR_RO(configurable);
1479
1480static ssize_t clients_show(struct device *dev,
1481			    struct device_attribute *attr, char *buf)
1482{
1483	struct idxd_device *idxd = confdev_to_idxd(dev);
1484	int count = 0, i;
1485
1486	spin_lock(&idxd->dev_lock);
1487	for (i = 0; i < idxd->max_wqs; i++) {
1488		struct idxd_wq *wq = idxd->wqs[i];
1489
1490		count += wq->client_count;
1491	}
1492	spin_unlock(&idxd->dev_lock);
1493
1494	return sysfs_emit(buf, "%d\n", count);
1495}
1496static DEVICE_ATTR_RO(clients);
1497
1498static ssize_t pasid_enabled_show(struct device *dev,
1499				  struct device_attribute *attr, char *buf)
1500{
1501	struct idxd_device *idxd = confdev_to_idxd(dev);
1502
1503	return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
1504}
1505static DEVICE_ATTR_RO(pasid_enabled);
1506
1507static ssize_t state_show(struct device *dev,
1508			  struct device_attribute *attr, char *buf)
1509{
1510	struct idxd_device *idxd = confdev_to_idxd(dev);
1511
1512	switch (idxd->state) {
1513	case IDXD_DEV_DISABLED:
1514		return sysfs_emit(buf, "disabled\n");
1515	case IDXD_DEV_ENABLED:
1516		return sysfs_emit(buf, "enabled\n");
1517	case IDXD_DEV_HALTED:
1518		return sysfs_emit(buf, "halted\n");
1519	}
1520
1521	return sysfs_emit(buf, "unknown\n");
1522}
1523static DEVICE_ATTR_RO(state);
1524
1525static ssize_t errors_show(struct device *dev,
1526			   struct device_attribute *attr, char *buf)
1527{
1528	struct idxd_device *idxd = confdev_to_idxd(dev);
1529	DECLARE_BITMAP(swerr_bmap, 256);
1530
1531	bitmap_zero(swerr_bmap, 256);
1532	spin_lock(&idxd->dev_lock);
1533	multi_u64_to_bmap(swerr_bmap, &idxd->sw_err.bits[0], 4);
1534	spin_unlock(&idxd->dev_lock);
1535	return sysfs_emit(buf, "%*pb\n", 256, swerr_bmap);
1536}
1537static DEVICE_ATTR_RO(errors);
1538
1539static ssize_t max_read_buffers_show(struct device *dev,
1540				     struct device_attribute *attr, char *buf)
1541{
1542	struct idxd_device *idxd = confdev_to_idxd(dev);
1543
1544	return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
1545}
1546
1547static ssize_t max_tokens_show(struct device *dev,
1548			       struct device_attribute *attr, char *buf)
1549{
1550	dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
1551	return max_read_buffers_show(dev, attr, buf);
1552}
1553
1554static DEVICE_ATTR_RO(max_tokens);	/* deprecated */
1555static DEVICE_ATTR_RO(max_read_buffers);
1556
1557static ssize_t read_buffer_limit_show(struct device *dev,
1558				      struct device_attribute *attr, char *buf)
1559{
1560	struct idxd_device *idxd = confdev_to_idxd(dev);
1561
1562	return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
1563}
1564
1565static ssize_t token_limit_show(struct device *dev,
1566				struct device_attribute *attr, char *buf)
1567{
1568	dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
1569	return read_buffer_limit_show(dev, attr, buf);
1570}
1571
1572static ssize_t read_buffer_limit_store(struct device *dev,
1573				       struct device_attribute *attr,
1574				       const char *buf, size_t count)
1575{
1576	struct idxd_device *idxd = confdev_to_idxd(dev);
1577	unsigned long val;
1578	int rc;
1579
1580	rc = kstrtoul(buf, 10, &val);
1581	if (rc < 0)
1582		return -EINVAL;
1583
1584	if (idxd->state == IDXD_DEV_ENABLED)
1585		return -EPERM;
1586
1587	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1588		return -EPERM;
1589
1590	if (!idxd->hw.group_cap.rdbuf_limit)
1591		return -EPERM;
1592
1593	if (val > idxd->hw.group_cap.total_rdbufs)
1594		return -EINVAL;
1595
1596	idxd->rdbuf_limit = val;
1597	return count;
1598}
1599
1600static ssize_t token_limit_store(struct device *dev,
1601				 struct device_attribute *attr,
1602				 const char *buf, size_t count)
1603{
1604	dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
1605	return read_buffer_limit_store(dev, attr, buf, count);
1606}
1607
1608static DEVICE_ATTR_RW(token_limit);	/* deprecated */
1609static DEVICE_ATTR_RW(read_buffer_limit);
1610
1611static ssize_t cdev_major_show(struct device *dev,
1612			       struct device_attribute *attr, char *buf)
1613{
1614	struct idxd_device *idxd = confdev_to_idxd(dev);
1615
1616	return sysfs_emit(buf, "%u\n", idxd->major);
1617}
1618static DEVICE_ATTR_RO(cdev_major);
1619
1620static ssize_t cmd_status_show(struct device *dev,
1621			       struct device_attribute *attr, char *buf)
1622{
1623	struct idxd_device *idxd = confdev_to_idxd(dev);
1624
1625	return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1626}
1627
1628static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
1629				const char *buf, size_t count)
1630{
1631	struct idxd_device *idxd = confdev_to_idxd(dev);
1632
1633	idxd->cmd_status = 0;
1634	return count;
1635}
1636static DEVICE_ATTR_RW(cmd_status);
1637
1638static ssize_t iaa_cap_show(struct device *dev,
1639			    struct device_attribute *attr, char *buf)
1640{
1641	struct idxd_device *idxd = confdev_to_idxd(dev);
1642
1643	if (idxd->hw.version < DEVICE_VERSION_2)
1644		return -EOPNOTSUPP;
1645
1646	return sysfs_emit(buf, "%#llx\n", idxd->hw.iaa_cap.bits);
1647}
1648static DEVICE_ATTR_RO(iaa_cap);
1649
1650static ssize_t event_log_size_show(struct device *dev,
1651				   struct device_attribute *attr, char *buf)
1652{
1653	struct idxd_device *idxd = confdev_to_idxd(dev);
1654
1655	if (!idxd->evl)
1656		return -EOPNOTSUPP;
1657
1658	return sysfs_emit(buf, "%u\n", idxd->evl->size);
1659}
1660
1661static ssize_t event_log_size_store(struct device *dev,
1662				    struct device_attribute *attr,
1663				    const char *buf, size_t count)
1664{
1665	struct idxd_device *idxd = confdev_to_idxd(dev);
1666	unsigned long val;
1667	int rc;
1668
1669	if (!idxd->evl)
1670		return -EOPNOTSUPP;
1671
1672	rc = kstrtoul(buf, 10, &val);
1673	if (rc < 0)
1674		return -EINVAL;
1675
1676	if (idxd->state == IDXD_DEV_ENABLED)
1677		return -EPERM;
1678
1679	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1680		return -EPERM;
1681
1682	if (val < IDXD_EVL_SIZE_MIN || val > IDXD_EVL_SIZE_MAX ||
1683	    (val * evl_ent_size(idxd) > ULONG_MAX - idxd->evl->dma))
1684		return -EINVAL;
1685
1686	idxd->evl->size = val;
1687	return count;
1688}
1689static DEVICE_ATTR_RW(event_log_size);
1690
1691static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr,
1692						      struct idxd_device *idxd)
1693{
1694	/* Intel IAA does not support batch processing, make it invisible */
1695	return attr == &dev_attr_max_batch_size.attr &&
1696	       idxd->data->type == IDXD_TYPE_IAX;
1697}
1698
1699static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr,
1700						    struct idxd_device *idxd)
1701{
1702	/*
1703	 * Intel IAA does not support Read Buffer allocation control,
1704	 * make these attributes invisible.
1705	 */
1706	return (attr == &dev_attr_max_tokens.attr ||
1707		attr == &dev_attr_max_read_buffers.attr ||
1708		attr == &dev_attr_token_limit.attr ||
1709		attr == &dev_attr_read_buffer_limit.attr) &&
1710		idxd->data->type == IDXD_TYPE_IAX;
1711}
1712
1713static bool idxd_device_attr_iaa_cap_invisible(struct attribute *attr,
1714					       struct idxd_device *idxd)
1715{
1716	return attr == &dev_attr_iaa_cap.attr &&
1717	       (idxd->data->type != IDXD_TYPE_IAX ||
1718	       idxd->hw.version < DEVICE_VERSION_2);
1719}
1720
1721static bool idxd_device_attr_event_log_size_invisible(struct attribute *attr,
1722						      struct idxd_device *idxd)
1723{
1724	return (attr == &dev_attr_event_log_size.attr &&
1725		!idxd->hw.gen_cap.evl_support);
1726}
1727
1728static umode_t idxd_device_attr_visible(struct kobject *kobj,
1729					struct attribute *attr, int n)
1730{
1731	struct device *dev = container_of(kobj, struct device, kobj);
1732	struct idxd_device *idxd = confdev_to_idxd(dev);
1733
1734	if (idxd_device_attr_max_batch_size_invisible(attr, idxd))
1735		return 0;
1736
1737	if (idxd_device_attr_read_buffers_invisible(attr, idxd))
1738		return 0;
1739
1740	if (idxd_device_attr_iaa_cap_invisible(attr, idxd))
1741		return 0;
1742
1743	if (idxd_device_attr_event_log_size_invisible(attr, idxd))
1744		return 0;
1745
1746	return attr->mode;
1747}
1748
1749static struct attribute *idxd_device_attributes[] = {
1750	&dev_attr_version.attr,
1751	&dev_attr_max_groups.attr,
1752	&dev_attr_max_work_queues.attr,
1753	&dev_attr_max_work_queues_size.attr,
1754	&dev_attr_max_engines.attr,
1755	&dev_attr_numa_node.attr,
1756	&dev_attr_max_batch_size.attr,
1757	&dev_attr_max_transfer_size.attr,
1758	&dev_attr_op_cap.attr,
1759	&dev_attr_gen_cap.attr,
1760	&dev_attr_configurable.attr,
1761	&dev_attr_clients.attr,
1762	&dev_attr_pasid_enabled.attr,
1763	&dev_attr_state.attr,
1764	&dev_attr_errors.attr,
1765	&dev_attr_max_tokens.attr,
1766	&dev_attr_max_read_buffers.attr,
1767	&dev_attr_token_limit.attr,
1768	&dev_attr_read_buffer_limit.attr,
1769	&dev_attr_cdev_major.attr,
1770	&dev_attr_cmd_status.attr,
1771	&dev_attr_iaa_cap.attr,
1772	&dev_attr_event_log_size.attr,
1773	NULL,
1774};
1775
1776static const struct attribute_group idxd_device_attribute_group = {
1777	.attrs = idxd_device_attributes,
1778	.is_visible = idxd_device_attr_visible,
1779};
1780
1781static const struct attribute_group *idxd_attribute_groups[] = {
1782	&idxd_device_attribute_group,
1783	NULL,
1784};
1785
1786static void idxd_conf_device_release(struct device *dev)
1787{
1788	struct idxd_device *idxd = confdev_to_idxd(dev);
1789
1790	kfree(idxd->groups);
1791	bitmap_free(idxd->wq_enable_map);
1792	kfree(idxd->wqs);
1793	kfree(idxd->engines);
1794	kfree(idxd->evl);
1795	kmem_cache_destroy(idxd->evl_cache);
1796	ida_free(&idxd_ida, idxd->id);
1797	bitmap_free(idxd->opcap_bmap);
1798	kfree(idxd);
1799}
1800
1801const struct device_type dsa_device_type = {
1802	.name = "dsa",
1803	.release = idxd_conf_device_release,
1804	.groups = idxd_attribute_groups,
1805};
1806
1807const struct device_type iax_device_type = {
1808	.name = "iax",
1809	.release = idxd_conf_device_release,
1810	.groups = idxd_attribute_groups,
1811};
1812
1813static int idxd_register_engine_devices(struct idxd_device *idxd)
1814{
1815	struct idxd_engine *engine;
1816	int i, j, rc;
1817
1818	for (i = 0; i < idxd->max_engines; i++) {
1819		engine = idxd->engines[i];
1820		rc = device_add(engine_confdev(engine));
1821		if (rc < 0)
1822			goto cleanup;
1823	}
1824
1825	return 0;
1826
1827cleanup:
1828	j = i - 1;
1829	for (; i < idxd->max_engines; i++) {
1830		engine = idxd->engines[i];
1831		put_device(engine_confdev(engine));
1832	}
1833
1834	while (j--) {
1835		engine = idxd->engines[j];
1836		device_unregister(engine_confdev(engine));
1837	}
1838	return rc;
1839}
1840
1841static int idxd_register_group_devices(struct idxd_device *idxd)
1842{
1843	struct idxd_group *group;
1844	int i, j, rc;
1845
1846	for (i = 0; i < idxd->max_groups; i++) {
1847		group = idxd->groups[i];
1848		rc = device_add(group_confdev(group));
1849		if (rc < 0)
1850			goto cleanup;
1851	}
1852
1853	return 0;
1854
1855cleanup:
1856	j = i - 1;
1857	for (; i < idxd->max_groups; i++) {
1858		group = idxd->groups[i];
1859		put_device(group_confdev(group));
1860	}
1861
1862	while (j--) {
1863		group = idxd->groups[j];
1864		device_unregister(group_confdev(group));
1865	}
1866	return rc;
1867}
1868
1869static int idxd_register_wq_devices(struct idxd_device *idxd)
1870{
1871	struct idxd_wq *wq;
1872	int i, rc, j;
1873
1874	for (i = 0; i < idxd->max_wqs; i++) {
1875		wq = idxd->wqs[i];
1876		rc = device_add(wq_confdev(wq));
1877		if (rc < 0)
1878			goto cleanup;
1879	}
1880
1881	return 0;
1882
1883cleanup:
1884	j = i - 1;
1885	for (; i < idxd->max_wqs; i++) {
1886		wq = idxd->wqs[i];
1887		put_device(wq_confdev(wq));
1888	}
1889
1890	while (j--) {
1891		wq = idxd->wqs[j];
1892		device_unregister(wq_confdev(wq));
1893	}
1894	return rc;
1895}
1896
1897int idxd_register_devices(struct idxd_device *idxd)
1898{
1899	struct device *dev = &idxd->pdev->dev;
1900	int rc, i;
1901
1902	rc = device_add(idxd_confdev(idxd));
1903	if (rc < 0)
1904		return rc;
1905
1906	rc = idxd_register_wq_devices(idxd);
1907	if (rc < 0) {
1908		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1909		goto err_wq;
1910	}
1911
1912	rc = idxd_register_engine_devices(idxd);
1913	if (rc < 0) {
1914		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1915		goto err_engine;
1916	}
1917
1918	rc = idxd_register_group_devices(idxd);
1919	if (rc < 0) {
1920		dev_dbg(dev, "Group device registering failed: %d\n", rc);
1921		goto err_group;
1922	}
1923
1924	return 0;
1925
1926 err_group:
1927	for (i = 0; i < idxd->max_engines; i++)
1928		device_unregister(engine_confdev(idxd->engines[i]));
1929 err_engine:
1930	for (i = 0; i < idxd->max_wqs; i++)
1931		device_unregister(wq_confdev(idxd->wqs[i]));
1932 err_wq:
1933	device_del(idxd_confdev(idxd));
1934	return rc;
1935}
1936
1937void idxd_unregister_devices(struct idxd_device *idxd)
1938{
1939	int i;
1940
1941	for (i = 0; i < idxd->max_wqs; i++) {
1942		struct idxd_wq *wq = idxd->wqs[i];
1943
1944		device_unregister(wq_confdev(wq));
1945	}
1946
1947	for (i = 0; i < idxd->max_engines; i++) {
1948		struct idxd_engine *engine = idxd->engines[i];
1949
1950		device_unregister(engine_confdev(engine));
1951	}
1952
1953	for (i = 0; i < idxd->max_groups; i++) {
1954		struct idxd_group *group = idxd->groups[i];
1955
1956		device_unregister(group_confdev(group));
1957	}
1958}
1959
1960int idxd_register_bus_type(void)
1961{
1962	return bus_register(&dsa_bus_type);
1963}
1964
1965void idxd_unregister_bus_type(void)
1966{
1967	bus_unregister(&dsa_bus_type);
1968}
1969