1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * CXL Flash Device Driver
4 *
5 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
7 *
8 * Copyright (C) 2018 IBM Corporation
9 */
10
11#include <linux/file.h>
12#include <linux/idr.h>
13#include <linux/module.h>
14#include <linux/mount.h>
15#include <linux/pseudo_fs.h>
16#include <linux/poll.h>
17#include <linux/sched/signal.h>
18#include <linux/interrupt.h>
19#include <linux/irqdomain.h>
20#include <asm/xive.h>
21#include <misc/ocxl.h>
22
23#include <uapi/misc/cxl.h>
24
25#include "backend.h"
26#include "ocxl_hw.h"
27
28/*
29 * Pseudo-filesystem to allocate inodes.
30 */
31
32#define OCXLFLASH_FS_MAGIC      0x1697698f
33
34static int ocxlflash_fs_cnt;
35static struct vfsmount *ocxlflash_vfs_mount;
36
37static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
38{
39	return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
40}
41
42static struct file_system_type ocxlflash_fs_type = {
43	.name		= "ocxlflash",
44	.owner		= THIS_MODULE,
45	.init_fs_context = ocxlflash_fs_init_fs_context,
46	.kill_sb	= kill_anon_super,
47};
48
49/*
50 * ocxlflash_release_mapping() - release the memory mapping
51 * @ctx:	Context whose mapping is to be released.
52 */
53static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
54{
55	if (ctx->mapping)
56		simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
57	ctx->mapping = NULL;
58}
59
60/*
61 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
62 * @dev:	Generic device of the host.
63 * @name:	Name of the pseudo filesystem.
64 * @fops:	File operations.
65 * @priv:	Private data.
66 * @flags:	Flags for the file.
67 *
68 * Return: pointer to the file on success, ERR_PTR on failure
69 */
70static struct file *ocxlflash_getfile(struct device *dev, const char *name,
71				      const struct file_operations *fops,
72				      void *priv, int flags)
73{
74	struct file *file;
75	struct inode *inode;
76	int rc;
77
78	if (fops->owner && !try_module_get(fops->owner)) {
79		dev_err(dev, "%s: Owner does not exist\n", __func__);
80		rc = -ENOENT;
81		goto err1;
82	}
83
84	rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
85			   &ocxlflash_fs_cnt);
86	if (unlikely(rc < 0)) {
87		dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
88			__func__, rc);
89		goto err2;
90	}
91
92	inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
93	if (IS_ERR(inode)) {
94		rc = PTR_ERR(inode);
95		dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
96			__func__, rc);
97		goto err3;
98	}
99
100	file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
101				 flags & (O_ACCMODE | O_NONBLOCK), fops);
102	if (IS_ERR(file)) {
103		rc = PTR_ERR(file);
104		dev_err(dev, "%s: alloc_file failed rc=%d\n",
105			__func__, rc);
106		goto err4;
107	}
108
109	file->private_data = priv;
110out:
111	return file;
112err4:
113	iput(inode);
114err3:
115	simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
116err2:
117	module_put(fops->owner);
118err1:
119	file = ERR_PTR(rc);
120	goto out;
121}
122
123/**
124 * ocxlflash_psa_map() - map the process specific MMIO space
125 * @ctx_cookie:	Adapter context for which the mapping needs to be done.
126 *
127 * Return: MMIO pointer of the mapped region
128 */
129static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
130{
131	struct ocxlflash_context *ctx = ctx_cookie;
132	struct device *dev = ctx->hw_afu->dev;
133
134	mutex_lock(&ctx->state_mutex);
135	if (ctx->state != STARTED) {
136		dev_err(dev, "%s: Context not started, state=%d\n", __func__,
137			ctx->state);
138		mutex_unlock(&ctx->state_mutex);
139		return NULL;
140	}
141	mutex_unlock(&ctx->state_mutex);
142
143	return ioremap(ctx->psn_phys, ctx->psn_size);
144}
145
146/**
147 * ocxlflash_psa_unmap() - unmap the process specific MMIO space
148 * @addr:	MMIO pointer to unmap.
149 */
150static void ocxlflash_psa_unmap(void __iomem *addr)
151{
152	iounmap(addr);
153}
154
155/**
156 * ocxlflash_process_element() - get process element of the adapter context
157 * @ctx_cookie:	Adapter context associated with the process element.
158 *
159 * Return: process element of the adapter context
160 */
161static int ocxlflash_process_element(void *ctx_cookie)
162{
163	struct ocxlflash_context *ctx = ctx_cookie;
164
165	return ctx->pe;
166}
167
168/**
169 * afu_map_irq() - map the interrupt of the adapter context
170 * @flags:	Flags.
171 * @ctx:	Adapter context.
172 * @num:	Per-context AFU interrupt number.
173 * @handler:	Interrupt handler to register.
174 * @cookie:	Interrupt handler private data.
175 * @name:	Name of the interrupt.
176 *
177 * Return: 0 on success, -errno on failure
178 */
179static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
180		       irq_handler_t handler, void *cookie, char *name)
181{
182	struct ocxl_hw_afu *afu = ctx->hw_afu;
183	struct device *dev = afu->dev;
184	struct ocxlflash_irqs *irq;
185	struct xive_irq_data *xd;
186	u32 virq;
187	int rc = 0;
188
189	if (num < 0 || num >= ctx->num_irqs) {
190		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
191		rc = -ENOENT;
192		goto out;
193	}
194
195	irq = &ctx->irqs[num];
196	virq = irq_create_mapping(NULL, irq->hwirq);
197	if (unlikely(!virq)) {
198		dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
199		rc = -ENOMEM;
200		goto out;
201	}
202
203	rc = request_irq(virq, handler, 0, name, cookie);
204	if (unlikely(rc)) {
205		dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
206		goto err1;
207	}
208
209	xd = irq_get_handler_data(virq);
210	if (unlikely(!xd)) {
211		dev_err(dev, "%s: Can't get interrupt data\n", __func__);
212		rc = -ENXIO;
213		goto err2;
214	}
215
216	irq->virq = virq;
217	irq->vtrig = xd->trig_mmio;
218out:
219	return rc;
220err2:
221	free_irq(virq, cookie);
222err1:
223	irq_dispose_mapping(virq);
224	goto out;
225}
226
227/**
228 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
229 * @ctx_cookie:	Adapter context.
230 * @num:	Per-context AFU interrupt number.
231 * @handler:	Interrupt handler to register.
232 * @cookie:	Interrupt handler private data.
233 * @name:	Name of the interrupt.
234 *
235 * Return: 0 on success, -errno on failure
236 */
237static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
238				 irq_handler_t handler, void *cookie,
239				 char *name)
240{
241	return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
242}
243
244/**
245 * afu_unmap_irq() - unmap the interrupt
246 * @flags:	Flags.
247 * @ctx:	Adapter context.
248 * @num:	Per-context AFU interrupt number.
249 * @cookie:	Interrupt handler private data.
250 */
251static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
252			  void *cookie)
253{
254	struct ocxl_hw_afu *afu = ctx->hw_afu;
255	struct device *dev = afu->dev;
256	struct ocxlflash_irqs *irq;
257
258	if (num < 0 || num >= ctx->num_irqs) {
259		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
260		return;
261	}
262
263	irq = &ctx->irqs[num];
264
265	if (irq_find_mapping(NULL, irq->hwirq)) {
266		free_irq(irq->virq, cookie);
267		irq_dispose_mapping(irq->virq);
268	}
269
270	memset(irq, 0, sizeof(*irq));
271}
272
273/**
274 * ocxlflash_unmap_afu_irq() - unmap the interrupt
275 * @ctx_cookie:	Adapter context.
276 * @num:	Per-context AFU interrupt number.
277 * @cookie:	Interrupt handler private data.
278 */
279static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
280{
281	return afu_unmap_irq(0, ctx_cookie, num, cookie);
282}
283
284/**
285 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
286 * @ctx_cookie:	Context associated with the interrupt.
287 * @irq:	Interrupt number.
288 *
289 * Return: effective address of the mapped region
290 */
291static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
292{
293	struct ocxlflash_context *ctx = ctx_cookie;
294
295	if (irq < 0 || irq >= ctx->num_irqs)
296		return 0;
297
298	return (__force u64)ctx->irqs[irq].vtrig;
299}
300
301/**
302 * ocxlflash_xsl_fault() - callback when translation error is triggered
303 * @data:	Private data provided at callback registration, the context.
304 * @addr:	Address that triggered the error.
305 * @dsisr:	Value of dsisr register.
306 */
307static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
308{
309	struct ocxlflash_context *ctx = data;
310
311	spin_lock(&ctx->slock);
312	ctx->fault_addr = addr;
313	ctx->fault_dsisr = dsisr;
314	ctx->pending_fault = true;
315	spin_unlock(&ctx->slock);
316
317	wake_up_all(&ctx->wq);
318}
319
320/**
321 * start_context() - local routine to start a context
322 * @ctx:	Adapter context to be started.
323 *
324 * Assign the context specific MMIO space, add and enable the PE.
325 *
326 * Return: 0 on success, -errno on failure
327 */
328static int start_context(struct ocxlflash_context *ctx)
329{
330	struct ocxl_hw_afu *afu = ctx->hw_afu;
331	struct ocxl_afu_config *acfg = &afu->acfg;
332	void *link_token = afu->link_token;
333	struct pci_dev *pdev = afu->pdev;
334	struct device *dev = afu->dev;
335	bool master = ctx->master;
336	struct mm_struct *mm;
337	int rc = 0;
338	u32 pid;
339
340	mutex_lock(&ctx->state_mutex);
341	if (ctx->state != OPENED) {
342		dev_err(dev, "%s: Context state invalid, state=%d\n",
343			__func__, ctx->state);
344		rc = -EINVAL;
345		goto out;
346	}
347
348	if (master) {
349		ctx->psn_size = acfg->global_mmio_size;
350		ctx->psn_phys = afu->gmmio_phys;
351	} else {
352		ctx->psn_size = acfg->pp_mmio_stride;
353		ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
354	}
355
356	/* pid and mm not set for master contexts */
357	if (master) {
358		pid = 0;
359		mm = NULL;
360	} else {
361		pid = current->mm->context.id;
362		mm = current->mm;
363	}
364
365	rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
366			      pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
367			      ctx);
368	if (unlikely(rc)) {
369		dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
370			__func__, rc);
371		goto out;
372	}
373
374	ctx->state = STARTED;
375out:
376	mutex_unlock(&ctx->state_mutex);
377	return rc;
378}
379
380/**
381 * ocxlflash_start_context() - start a kernel context
382 * @ctx_cookie:	Adapter context to be started.
383 *
384 * Return: 0 on success, -errno on failure
385 */
386static int ocxlflash_start_context(void *ctx_cookie)
387{
388	struct ocxlflash_context *ctx = ctx_cookie;
389
390	return start_context(ctx);
391}
392
393/**
394 * ocxlflash_stop_context() - stop a context
395 * @ctx_cookie:	Adapter context to be stopped.
396 *
397 * Return: 0 on success, -errno on failure
398 */
399static int ocxlflash_stop_context(void *ctx_cookie)
400{
401	struct ocxlflash_context *ctx = ctx_cookie;
402	struct ocxl_hw_afu *afu = ctx->hw_afu;
403	struct ocxl_afu_config *acfg = &afu->acfg;
404	struct pci_dev *pdev = afu->pdev;
405	struct device *dev = afu->dev;
406	enum ocxlflash_ctx_state state;
407	int rc = 0;
408
409	mutex_lock(&ctx->state_mutex);
410	state = ctx->state;
411	ctx->state = CLOSED;
412	mutex_unlock(&ctx->state_mutex);
413	if (state != STARTED)
414		goto out;
415
416	rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
417					 ctx->pe);
418	if (unlikely(rc)) {
419		dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
420			__func__, rc);
421		/* If EBUSY, PE could be referenced in future by the AFU */
422		if (rc == -EBUSY)
423			goto out;
424	}
425
426	rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
427	if (unlikely(rc)) {
428		dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
429			__func__, rc);
430		goto out;
431	}
432out:
433	return rc;
434}
435
436/**
437 * ocxlflash_afu_reset() - reset the AFU
438 * @ctx_cookie:	Adapter context.
439 */
440static int ocxlflash_afu_reset(void *ctx_cookie)
441{
442	struct ocxlflash_context *ctx = ctx_cookie;
443	struct device *dev = ctx->hw_afu->dev;
444
445	/* Pending implementation from OCXL transport services */
446	dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
447
448	/* Silently return success until it is implemented */
449	return 0;
450}
451
452/**
453 * ocxlflash_set_master() - sets the context as master
454 * @ctx_cookie:	Adapter context to set as master.
455 */
456static void ocxlflash_set_master(void *ctx_cookie)
457{
458	struct ocxlflash_context *ctx = ctx_cookie;
459
460	ctx->master = true;
461}
462
463/**
464 * ocxlflash_get_context() - obtains the context associated with the host
465 * @pdev:	PCI device associated with the host.
466 * @afu_cookie:	Hardware AFU associated with the host.
467 *
468 * Return: returns the pointer to host adapter context
469 */
470static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
471{
472	struct ocxl_hw_afu *afu = afu_cookie;
473
474	return afu->ocxl_ctx;
475}
476
477/**
478 * ocxlflash_dev_context_init() - allocate and initialize an adapter context
479 * @pdev:	PCI device associated with the host.
480 * @afu_cookie:	Hardware AFU associated with the host.
481 *
482 * Return: returns the adapter context on success, ERR_PTR on failure
483 */
484static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
485{
486	struct ocxl_hw_afu *afu = afu_cookie;
487	struct device *dev = afu->dev;
488	struct ocxlflash_context *ctx;
489	int rc;
490
491	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
492	if (unlikely(!ctx)) {
493		dev_err(dev, "%s: Context allocation failed\n", __func__);
494		rc = -ENOMEM;
495		goto err1;
496	}
497
498	idr_preload(GFP_KERNEL);
499	rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
500	idr_preload_end();
501	if (unlikely(rc < 0)) {
502		dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
503		goto err2;
504	}
505
506	spin_lock_init(&ctx->slock);
507	init_waitqueue_head(&ctx->wq);
508	mutex_init(&ctx->state_mutex);
509
510	ctx->state = OPENED;
511	ctx->pe = rc;
512	ctx->master = false;
513	ctx->mapping = NULL;
514	ctx->hw_afu = afu;
515	ctx->irq_bitmap = 0;
516	ctx->pending_irq = false;
517	ctx->pending_fault = false;
518out:
519	return ctx;
520err2:
521	kfree(ctx);
522err1:
523	ctx = ERR_PTR(rc);
524	goto out;
525}
526
527/**
528 * ocxlflash_release_context() - releases an adapter context
529 * @ctx_cookie:	Adapter context to be released.
530 *
531 * Return: 0 on success, -errno on failure
532 */
533static int ocxlflash_release_context(void *ctx_cookie)
534{
535	struct ocxlflash_context *ctx = ctx_cookie;
536	struct device *dev;
537	int rc = 0;
538
539	if (!ctx)
540		goto out;
541
542	dev = ctx->hw_afu->dev;
543	mutex_lock(&ctx->state_mutex);
544	if (ctx->state >= STARTED) {
545		dev_err(dev, "%s: Context in use, state=%d\n", __func__,
546			ctx->state);
547		mutex_unlock(&ctx->state_mutex);
548		rc = -EBUSY;
549		goto out;
550	}
551	mutex_unlock(&ctx->state_mutex);
552
553	idr_remove(&ctx->hw_afu->idr, ctx->pe);
554	ocxlflash_release_mapping(ctx);
555	kfree(ctx);
556out:
557	return rc;
558}
559
560/**
561 * ocxlflash_perst_reloads_same_image() - sets the image reload policy
562 * @afu_cookie:	Hardware AFU associated with the host.
563 * @image:	Whether to load the same image on PERST.
564 */
565static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
566{
567	struct ocxl_hw_afu *afu = afu_cookie;
568
569	afu->perst_same_image = image;
570}
571
572/**
573 * ocxlflash_read_adapter_vpd() - reads the adapter VPD
574 * @pdev:	PCI device associated with the host.
575 * @buf:	Buffer to get the VPD data.
576 * @count:	Size of buffer (maximum bytes that can be read).
577 *
578 * Return: size of VPD on success, -errno on failure
579 */
580static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
581					  size_t count)
582{
583	return pci_read_vpd(pdev, 0, count, buf);
584}
585
586/**
587 * free_afu_irqs() - internal service to free interrupts
588 * @ctx:	Adapter context.
589 */
590static void free_afu_irqs(struct ocxlflash_context *ctx)
591{
592	struct ocxl_hw_afu *afu = ctx->hw_afu;
593	struct device *dev = afu->dev;
594	int i;
595
596	if (!ctx->irqs) {
597		dev_err(dev, "%s: Interrupts not allocated\n", __func__);
598		return;
599	}
600
601	for (i = ctx->num_irqs; i >= 0; i--)
602		ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
603
604	kfree(ctx->irqs);
605	ctx->irqs = NULL;
606}
607
608/**
609 * alloc_afu_irqs() - internal service to allocate interrupts
610 * @ctx:	Context associated with the request.
611 * @num:	Number of interrupts requested.
612 *
613 * Return: 0 on success, -errno on failure
614 */
615static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
616{
617	struct ocxl_hw_afu *afu = ctx->hw_afu;
618	struct device *dev = afu->dev;
619	struct ocxlflash_irqs *irqs;
620	int rc = 0;
621	int hwirq;
622	int i;
623
624	if (ctx->irqs) {
625		dev_err(dev, "%s: Interrupts already allocated\n", __func__);
626		rc = -EEXIST;
627		goto out;
628	}
629
630	if (num > OCXL_MAX_IRQS) {
631		dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
632		rc = -EINVAL;
633		goto out;
634	}
635
636	irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
637	if (unlikely(!irqs)) {
638		dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
639		rc = -ENOMEM;
640		goto out;
641	}
642
643	for (i = 0; i < num; i++) {
644		rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
645		if (unlikely(rc)) {
646			dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
647				__func__, rc);
648			goto err;
649		}
650
651		irqs[i].hwirq = hwirq;
652	}
653
654	ctx->irqs = irqs;
655	ctx->num_irqs = num;
656out:
657	return rc;
658err:
659	for (i = i-1; i >= 0; i--)
660		ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
661	kfree(irqs);
662	goto out;
663}
664
665/**
666 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
667 * @ctx_cookie:	Context associated with the request.
668 * @num:	Number of interrupts requested.
669 *
670 * Return: 0 on success, -errno on failure
671 */
672static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
673{
674	return alloc_afu_irqs(ctx_cookie, num);
675}
676
677/**
678 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
679 * @ctx_cookie:	Adapter context.
680 */
681static void ocxlflash_free_afu_irqs(void *ctx_cookie)
682{
683	free_afu_irqs(ctx_cookie);
684}
685
686/**
687 * ocxlflash_unconfig_afu() - unconfigure the AFU
688 * @afu: AFU associated with the host.
689 */
690static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
691{
692	if (afu->gmmio_virt) {
693		iounmap(afu->gmmio_virt);
694		afu->gmmio_virt = NULL;
695	}
696}
697
698/**
699 * ocxlflash_destroy_afu() - destroy the AFU structure
700 * @afu_cookie:	AFU to be freed.
701 */
702static void ocxlflash_destroy_afu(void *afu_cookie)
703{
704	struct ocxl_hw_afu *afu = afu_cookie;
705	int pos;
706
707	if (!afu)
708		return;
709
710	ocxlflash_release_context(afu->ocxl_ctx);
711	idr_destroy(&afu->idr);
712
713	/* Disable the AFU */
714	pos = afu->acfg.dvsec_afu_control_pos;
715	ocxl_config_set_afu_state(afu->pdev, pos, 0);
716
717	ocxlflash_unconfig_afu(afu);
718	kfree(afu);
719}
720
721/**
722 * ocxlflash_config_fn() - configure the host function
723 * @pdev:	PCI device associated with the host.
724 * @afu:	AFU associated with the host.
725 *
726 * Return: 0 on success, -errno on failure
727 */
728static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
729{
730	struct ocxl_fn_config *fcfg = &afu->fcfg;
731	struct device *dev = &pdev->dev;
732	u16 base, enabled, supported;
733	int rc = 0;
734
735	/* Read DVSEC config of the function */
736	rc = ocxl_config_read_function(pdev, fcfg);
737	if (unlikely(rc)) {
738		dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
739			__func__, rc);
740		goto out;
741	}
742
743	/* Check if function has AFUs defined, only 1 per function supported */
744	if (fcfg->max_afu_index >= 0) {
745		afu->is_present = true;
746		if (fcfg->max_afu_index != 0)
747			dev_warn(dev, "%s: Unexpected AFU index value %d\n",
748				 __func__, fcfg->max_afu_index);
749	}
750
751	rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
752	if (unlikely(rc)) {
753		dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
754			__func__, rc);
755		goto out;
756	}
757
758	afu->fn_actag_base = base;
759	afu->fn_actag_enabled = enabled;
760
761	ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
762	dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
763		__func__, base, enabled);
764
765	rc = ocxl_link_setup(pdev, 0, &afu->link_token);
766	if (unlikely(rc)) {
767		dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
768			__func__, rc);
769		goto out;
770	}
771
772	rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
773	if (unlikely(rc)) {
774		dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
775			__func__, rc);
776		goto err;
777	}
778out:
779	return rc;
780err:
781	ocxl_link_release(pdev, afu->link_token);
782	goto out;
783}
784
785/**
786 * ocxlflash_unconfig_fn() - unconfigure the host function
787 * @pdev:	PCI device associated with the host.
788 * @afu:	AFU associated with the host.
789 */
790static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
791{
792	ocxl_link_release(pdev, afu->link_token);
793}
794
795/**
796 * ocxlflash_map_mmio() - map the AFU MMIO space
797 * @afu: AFU associated with the host.
798 *
799 * Return: 0 on success, -errno on failure
800 */
801static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
802{
803	struct ocxl_afu_config *acfg = &afu->acfg;
804	struct pci_dev *pdev = afu->pdev;
805	struct device *dev = afu->dev;
806	phys_addr_t gmmio, ppmmio;
807	int rc = 0;
808
809	rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
810	if (unlikely(rc)) {
811		dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
812			__func__, rc);
813		goto out;
814	}
815	gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
816	gmmio += acfg->global_mmio_offset;
817
818	rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
819	if (unlikely(rc)) {
820		dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
821			__func__, rc);
822		goto err1;
823	}
824	ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
825	ppmmio += acfg->pp_mmio_offset;
826
827	afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
828	if (unlikely(!afu->gmmio_virt)) {
829		dev_err(dev, "%s: MMIO mapping failed\n", __func__);
830		rc = -ENOMEM;
831		goto err2;
832	}
833
834	afu->gmmio_phys = gmmio;
835	afu->ppmmio_phys = ppmmio;
836out:
837	return rc;
838err2:
839	pci_release_region(pdev, acfg->pp_mmio_bar);
840err1:
841	pci_release_region(pdev, acfg->global_mmio_bar);
842	goto out;
843}
844
845/**
846 * ocxlflash_config_afu() - configure the host AFU
847 * @pdev:	PCI device associated with the host.
848 * @afu:	AFU associated with the host.
849 *
850 * Must be called _after_ host function configuration.
851 *
852 * Return: 0 on success, -errno on failure
853 */
854static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
855{
856	struct ocxl_afu_config *acfg = &afu->acfg;
857	struct ocxl_fn_config *fcfg = &afu->fcfg;
858	struct device *dev = &pdev->dev;
859	int count;
860	int base;
861	int pos;
862	int rc = 0;
863
864	/* This HW AFU function does not have any AFUs defined */
865	if (!afu->is_present)
866		goto out;
867
868	/* Read AFU config at index 0 */
869	rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
870	if (unlikely(rc)) {
871		dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
872			__func__, rc);
873		goto out;
874	}
875
876	/* Only one AFU per function is supported, so actag_base is same */
877	base = afu->fn_actag_base;
878	count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
879	pos = acfg->dvsec_afu_control_pos;
880
881	ocxl_config_set_afu_actag(pdev, pos, base, count);
882	dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
883	afu->afu_actag_base = base;
884	afu->afu_actag_enabled = count;
885	afu->max_pasid = 1 << acfg->pasid_supported_log;
886
887	ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
888
889	rc = ocxlflash_map_mmio(afu);
890	if (unlikely(rc)) {
891		dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
892			__func__, rc);
893		goto out;
894	}
895
896	/* Enable the AFU */
897	ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
898out:
899	return rc;
900}
901
902/**
903 * ocxlflash_create_afu() - create the AFU for OCXL
904 * @pdev:	PCI device associated with the host.
905 *
906 * Return: AFU on success, NULL on failure
907 */
908static void *ocxlflash_create_afu(struct pci_dev *pdev)
909{
910	struct device *dev = &pdev->dev;
911	struct ocxlflash_context *ctx;
912	struct ocxl_hw_afu *afu;
913	int rc;
914
915	afu = kzalloc(sizeof(*afu), GFP_KERNEL);
916	if (unlikely(!afu)) {
917		dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
918		goto out;
919	}
920
921	afu->pdev = pdev;
922	afu->dev = dev;
923	idr_init(&afu->idr);
924
925	rc = ocxlflash_config_fn(pdev, afu);
926	if (unlikely(rc)) {
927		dev_err(dev, "%s: Function configuration failed rc=%d\n",
928			__func__, rc);
929		goto err1;
930	}
931
932	rc = ocxlflash_config_afu(pdev, afu);
933	if (unlikely(rc)) {
934		dev_err(dev, "%s: AFU configuration failed rc=%d\n",
935			__func__, rc);
936		goto err2;
937	}
938
939	ctx = ocxlflash_dev_context_init(pdev, afu);
940	if (IS_ERR(ctx)) {
941		rc = PTR_ERR(ctx);
942		dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
943			__func__, rc);
944		goto err3;
945	}
946
947	afu->ocxl_ctx = ctx;
948out:
949	return afu;
950err3:
951	ocxlflash_unconfig_afu(afu);
952err2:
953	ocxlflash_unconfig_fn(pdev, afu);
954err1:
955	idr_destroy(&afu->idr);
956	kfree(afu);
957	afu = NULL;
958	goto out;
959}
960
961/**
962 * ctx_event_pending() - check for any event pending on the context
963 * @ctx:	Context to be checked.
964 *
965 * Return: true if there is an event pending, false if none pending
966 */
967static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
968{
969	if (ctx->pending_irq || ctx->pending_fault)
970		return true;
971
972	return false;
973}
974
975/**
976 * afu_poll() - poll the AFU for events on the context
977 * @file:	File associated with the adapter context.
978 * @poll:	Poll structure from the user.
979 *
980 * Return: poll mask
981 */
982static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
983{
984	struct ocxlflash_context *ctx = file->private_data;
985	struct device *dev = ctx->hw_afu->dev;
986	ulong lock_flags;
987	int mask = 0;
988
989	poll_wait(file, &ctx->wq, poll);
990
991	spin_lock_irqsave(&ctx->slock, lock_flags);
992	if (ctx_event_pending(ctx))
993		mask |= POLLIN | POLLRDNORM;
994	else if (ctx->state == CLOSED)
995		mask |= POLLERR;
996	spin_unlock_irqrestore(&ctx->slock, lock_flags);
997
998	dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
999		__func__, ctx->pe, mask);
1000
1001	return mask;
1002}
1003
1004/**
1005 * afu_read() - perform a read on the context for any event
1006 * @file:	File associated with the adapter context.
1007 * @buf:	Buffer to receive the data.
1008 * @count:	Size of buffer (maximum bytes that can be read).
1009 * @off:	Offset.
1010 *
1011 * Return: size of the data read on success, -errno on failure
1012 */
1013static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
1014			loff_t *off)
1015{
1016	struct ocxlflash_context *ctx = file->private_data;
1017	struct device *dev = ctx->hw_afu->dev;
1018	struct cxl_event event;
1019	ulong lock_flags;
1020	ssize_t esize;
1021	ssize_t rc;
1022	int bit;
1023	DEFINE_WAIT(event_wait);
1024
1025	if (*off != 0) {
1026		dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
1027			__func__, *off);
1028		rc = -EINVAL;
1029		goto out;
1030	}
1031
1032	spin_lock_irqsave(&ctx->slock, lock_flags);
1033
1034	for (;;) {
1035		prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
1036
1037		if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
1038			break;
1039
1040		if (file->f_flags & O_NONBLOCK) {
1041			dev_err(dev, "%s: File cannot be blocked on I/O\n",
1042				__func__);
1043			rc = -EAGAIN;
1044			goto err;
1045		}
1046
1047		if (signal_pending(current)) {
1048			dev_err(dev, "%s: Signal pending on the process\n",
1049				__func__);
1050			rc = -ERESTARTSYS;
1051			goto err;
1052		}
1053
1054		spin_unlock_irqrestore(&ctx->slock, lock_flags);
1055		schedule();
1056		spin_lock_irqsave(&ctx->slock, lock_flags);
1057	}
1058
1059	finish_wait(&ctx->wq, &event_wait);
1060
1061	memset(&event, 0, sizeof(event));
1062	event.header.process_element = ctx->pe;
1063	event.header.size = sizeof(struct cxl_event_header);
1064	if (ctx->pending_irq) {
1065		esize = sizeof(struct cxl_event_afu_interrupt);
1066		event.header.size += esize;
1067		event.header.type = CXL_EVENT_AFU_INTERRUPT;
1068
1069		bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
1070		clear_bit(bit, &ctx->irq_bitmap);
1071		event.irq.irq = bit + 1;
1072		if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
1073			ctx->pending_irq = false;
1074	} else if (ctx->pending_fault) {
1075		event.header.size += sizeof(struct cxl_event_data_storage);
1076		event.header.type = CXL_EVENT_DATA_STORAGE;
1077		event.fault.addr = ctx->fault_addr;
1078		event.fault.dsisr = ctx->fault_dsisr;
1079		ctx->pending_fault = false;
1080	}
1081
1082	spin_unlock_irqrestore(&ctx->slock, lock_flags);
1083
1084	if (copy_to_user(buf, &event, event.header.size)) {
1085		dev_err(dev, "%s: copy_to_user failed\n", __func__);
1086		rc = -EFAULT;
1087		goto out;
1088	}
1089
1090	rc = event.header.size;
1091out:
1092	return rc;
1093err:
1094	finish_wait(&ctx->wq, &event_wait);
1095	spin_unlock_irqrestore(&ctx->slock, lock_flags);
1096	goto out;
1097}
1098
1099/**
1100 * afu_release() - release and free the context
1101 * @inode:	File inode pointer.
1102 * @file:	File associated with the context.
1103 *
1104 * Return: 0 on success, -errno on failure
1105 */
1106static int afu_release(struct inode *inode, struct file *file)
1107{
1108	struct ocxlflash_context *ctx = file->private_data;
1109	int i;
1110
1111	/* Unmap and free the interrupts associated with the context */
1112	for (i = ctx->num_irqs; i >= 0; i--)
1113		afu_unmap_irq(0, ctx, i, ctx);
1114	free_afu_irqs(ctx);
1115
1116	return ocxlflash_release_context(ctx);
1117}
1118
1119/**
1120 * ocxlflash_mmap_fault() - mmap fault handler
1121 * @vmf:	VM fault associated with current fault.
1122 *
1123 * Return: 0 on success, -errno on failure
1124 */
1125static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
1126{
1127	struct vm_area_struct *vma = vmf->vma;
1128	struct ocxlflash_context *ctx = vma->vm_file->private_data;
1129	struct device *dev = ctx->hw_afu->dev;
1130	u64 mmio_area, offset;
1131
1132	offset = vmf->pgoff << PAGE_SHIFT;
1133	if (offset >= ctx->psn_size)
1134		return VM_FAULT_SIGBUS;
1135
1136	mutex_lock(&ctx->state_mutex);
1137	if (ctx->state != STARTED) {
1138		dev_err(dev, "%s: Context not started, state=%d\n",
1139			__func__, ctx->state);
1140		mutex_unlock(&ctx->state_mutex);
1141		return VM_FAULT_SIGBUS;
1142	}
1143	mutex_unlock(&ctx->state_mutex);
1144
1145	mmio_area = ctx->psn_phys;
1146	mmio_area += offset;
1147
1148	return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
1149}
1150
1151static const struct vm_operations_struct ocxlflash_vmops = {
1152	.fault = ocxlflash_mmap_fault,
1153};
1154
1155/**
1156 * afu_mmap() - map the fault handler operations
1157 * @file:	File associated with the context.
1158 * @vma:	VM area associated with mapping.
1159 *
1160 * Return: 0 on success, -errno on failure
1161 */
1162static int afu_mmap(struct file *file, struct vm_area_struct *vma)
1163{
1164	struct ocxlflash_context *ctx = file->private_data;
1165
1166	if ((vma_pages(vma) + vma->vm_pgoff) >
1167	    (ctx->psn_size >> PAGE_SHIFT))
1168		return -EINVAL;
1169
1170	vm_flags_set(vma, VM_IO | VM_PFNMAP);
1171	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1172	vma->vm_ops = &ocxlflash_vmops;
1173	return 0;
1174}
1175
1176static const struct file_operations ocxl_afu_fops = {
1177	.owner		= THIS_MODULE,
1178	.poll		= afu_poll,
1179	.read		= afu_read,
1180	.release	= afu_release,
1181	.mmap		= afu_mmap,
1182};
1183
1184#define PATCH_FOPS(NAME)						\
1185	do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
1186
1187/**
1188 * ocxlflash_get_fd() - get file descriptor for an adapter context
1189 * @ctx_cookie:	Adapter context.
1190 * @fops:	File operations to be associated.
1191 * @fd:		File descriptor to be returned back.
1192 *
1193 * Return: pointer to the file on success, ERR_PTR on failure
1194 */
1195static struct file *ocxlflash_get_fd(void *ctx_cookie,
1196				     struct file_operations *fops, int *fd)
1197{
1198	struct ocxlflash_context *ctx = ctx_cookie;
1199	struct device *dev = ctx->hw_afu->dev;
1200	struct file *file;
1201	int flags, fdtmp;
1202	int rc = 0;
1203	char *name = NULL;
1204
1205	/* Only allow one fd per context */
1206	if (ctx->mapping) {
1207		dev_err(dev, "%s: Context is already mapped to an fd\n",
1208			__func__);
1209		rc = -EEXIST;
1210		goto err1;
1211	}
1212
1213	flags = O_RDWR | O_CLOEXEC;
1214
1215	/* This code is similar to anon_inode_getfd() */
1216	rc = get_unused_fd_flags(flags);
1217	if (unlikely(rc < 0)) {
1218		dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
1219			__func__, rc);
1220		goto err1;
1221	}
1222	fdtmp = rc;
1223
1224	/* Patch the file ops that are not defined */
1225	if (fops) {
1226		PATCH_FOPS(poll);
1227		PATCH_FOPS(read);
1228		PATCH_FOPS(release);
1229		PATCH_FOPS(mmap);
1230	} else /* Use default ops */
1231		fops = (struct file_operations *)&ocxl_afu_fops;
1232
1233	name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
1234	file = ocxlflash_getfile(dev, name, fops, ctx, flags);
1235	kfree(name);
1236	if (IS_ERR(file)) {
1237		rc = PTR_ERR(file);
1238		dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
1239			__func__, rc);
1240		goto err2;
1241	}
1242
1243	ctx->mapping = file->f_mapping;
1244	*fd = fdtmp;
1245out:
1246	return file;
1247err2:
1248	put_unused_fd(fdtmp);
1249err1:
1250	file = ERR_PTR(rc);
1251	goto out;
1252}
1253
1254/**
1255 * ocxlflash_fops_get_context() - get the context associated with the file
1256 * @file:	File associated with the adapter context.
1257 *
1258 * Return: pointer to the context
1259 */
1260static void *ocxlflash_fops_get_context(struct file *file)
1261{
1262	return file->private_data;
1263}
1264
1265/**
1266 * ocxlflash_afu_irq() - interrupt handler for user contexts
1267 * @irq:	Interrupt number.
1268 * @data:	Private data provided at interrupt registration, the context.
1269 *
1270 * Return: Always return IRQ_HANDLED.
1271 */
1272static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
1273{
1274	struct ocxlflash_context *ctx = data;
1275	struct device *dev = ctx->hw_afu->dev;
1276	int i;
1277
1278	dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
1279		__func__, ctx->pe, irq);
1280
1281	for (i = 0; i < ctx->num_irqs; i++) {
1282		if (ctx->irqs[i].virq == irq)
1283			break;
1284	}
1285	if (unlikely(i >= ctx->num_irqs)) {
1286		dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
1287		goto out;
1288	}
1289
1290	spin_lock(&ctx->slock);
1291	set_bit(i - 1, &ctx->irq_bitmap);
1292	ctx->pending_irq = true;
1293	spin_unlock(&ctx->slock);
1294
1295	wake_up_all(&ctx->wq);
1296out:
1297	return IRQ_HANDLED;
1298}
1299
1300/**
1301 * ocxlflash_start_work() - start a user context
1302 * @ctx_cookie:	Context to be started.
1303 * @num_irqs:	Number of interrupts requested.
1304 *
1305 * Return: 0 on success, -errno on failure
1306 */
1307static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
1308{
1309	struct ocxlflash_context *ctx = ctx_cookie;
1310	struct ocxl_hw_afu *afu = ctx->hw_afu;
1311	struct device *dev = afu->dev;
1312	char *name;
1313	int rc = 0;
1314	int i;
1315
1316	rc = alloc_afu_irqs(ctx, num_irqs);
1317	if (unlikely(rc < 0)) {
1318		dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
1319		goto out;
1320	}
1321
1322	for (i = 0; i < num_irqs; i++) {
1323		name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
1324				 dev_name(dev), ctx->pe, i);
1325		rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
1326		kfree(name);
1327		if (unlikely(rc < 0)) {
1328			dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
1329				__func__, rc);
1330			goto err;
1331		}
1332	}
1333
1334	rc = start_context(ctx);
1335	if (unlikely(rc)) {
1336		dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
1337		goto err;
1338	}
1339out:
1340	return rc;
1341err:
1342	for (i = i-1; i >= 0; i--)
1343		afu_unmap_irq(0, ctx, i, ctx);
1344	free_afu_irqs(ctx);
1345	goto out;
1346};
1347
1348/**
1349 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
1350 * @file:	File installed with adapter file descriptor.
1351 * @vma:	VM area associated with mapping.
1352 *
1353 * Return: 0 on success, -errno on failure
1354 */
1355static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
1356{
1357	return afu_mmap(file, vma);
1358}
1359
1360/**
1361 * ocxlflash_fd_release() - release the context associated with the file
1362 * @inode:	File inode pointer.
1363 * @file:	File associated with the adapter context.
1364 *
1365 * Return: 0 on success, -errno on failure
1366 */
1367static int ocxlflash_fd_release(struct inode *inode, struct file *file)
1368{
1369	return afu_release(inode, file);
1370}
1371
1372/* Backend ops to ocxlflash services */
1373const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
1374	.module			= THIS_MODULE,
1375	.psa_map		= ocxlflash_psa_map,
1376	.psa_unmap		= ocxlflash_psa_unmap,
1377	.process_element	= ocxlflash_process_element,
1378	.map_afu_irq		= ocxlflash_map_afu_irq,
1379	.unmap_afu_irq		= ocxlflash_unmap_afu_irq,
1380	.get_irq_objhndl	= ocxlflash_get_irq_objhndl,
1381	.start_context		= ocxlflash_start_context,
1382	.stop_context		= ocxlflash_stop_context,
1383	.afu_reset		= ocxlflash_afu_reset,
1384	.set_master		= ocxlflash_set_master,
1385	.get_context		= ocxlflash_get_context,
1386	.dev_context_init	= ocxlflash_dev_context_init,
1387	.release_context	= ocxlflash_release_context,
1388	.perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
1389	.read_adapter_vpd	= ocxlflash_read_adapter_vpd,
1390	.allocate_afu_irqs	= ocxlflash_allocate_afu_irqs,
1391	.free_afu_irqs		= ocxlflash_free_afu_irqs,
1392	.create_afu		= ocxlflash_create_afu,
1393	.destroy_afu		= ocxlflash_destroy_afu,
1394	.get_fd			= ocxlflash_get_fd,
1395	.fops_get_context	= ocxlflash_fops_get_context,
1396	.start_work		= ocxlflash_start_work,
1397	.fd_mmap		= ocxlflash_fd_mmap,
1398	.fd_release		= ocxlflash_fd_release,
1399};
1400