19Sjkh// SPDX-License-Identifier: GPL-2.0+
29Sjkh// Copyright 2017 IBM Corp.
350472Speter#include <linux/sched/mm.h>
49Sjkh#include "trace.h"
59Sjkh#include "ocxl_internal.h"
69Sjkh
79Sjkhint ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
89Sjkh		struct address_space *mapping)
99Sjkh{
109Sjkh	int pasid;
119Sjkh	struct ocxl_context *ctx;
129Sjkh
1311894Speter	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1411894Speter	if (!ctx)
159Sjkh		return -ENOMEM;
169Sjkh
179Sjkh	ctx->afu = afu;
189Sjkh	mutex_lock(&afu->contexts_lock);
199Sjkh	pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
209Sjkh			afu->pasid_base + afu->pasid_max, GFP_KERNEL);
219Sjkh	if (pasid < 0) {
229Sjkh		mutex_unlock(&afu->contexts_lock);
239Sjkh		kfree(ctx);
249Sjkh		return pasid;
259Sjkh	}
2611894Speter	afu->pasid_count++;
2711894Speter	mutex_unlock(&afu->contexts_lock);
289Sjkh
2911894Speter	ctx->pasid = pasid;
309Sjkh	ctx->status = OPENED;
319Sjkh	mutex_init(&ctx->status_mutex);
329Sjkh	ctx->mapping = mapping;
339Sjkh	mutex_init(&ctx->mapping_lock);
349Sjkh	init_waitqueue_head(&ctx->events_wq);
3511894Speter	mutex_init(&ctx->xsl_error_lock);
369Sjkh	mutex_init(&ctx->irq_lock);
379Sjkh	idr_init(&ctx->irq_idr);
389Sjkh	ctx->tidr = 0;
399Sjkh
4011894Speter	/*
419Sjkh	 * Keep a reference on the AFU to make sure it's valid for the
4211894Speter	 * duration of the life of the context
439Sjkh	 */
4411894Speter	ocxl_afu_get(afu);
459Sjkh	*context = ctx;
4611894Speter	return 0;
479Sjkh}
489SjkhEXPORT_SYMBOL_GPL(ocxl_context_alloc);
499Sjkh
509Sjkh/*
519Sjkh * Callback for when a translation fault triggers an error
529Sjkh * data:	a pointer to the context which triggered the fault
539Sjkh * addr:	the address that triggered the error
549Sjkh * dsisr:	the value of the PPC64 dsisr register
559Sjkh */
569Sjkhstatic void xsl_fault_error(void *data, u64 addr, u64 dsisr)
579Sjkh{
5811894Speter	struct ocxl_context *ctx = data;
599Sjkh
601880Swollman	mutex_lock(&ctx->xsl_error_lock);
619Sjkh	ctx->xsl_error.addr = addr;
629Sjkh	ctx->xsl_error.dsisr = dsisr;
639Sjkh	ctx->xsl_error.count++;
649Sjkh	mutex_unlock(&ctx->xsl_error_lock);
659Sjkh
6611894Speter	wake_up_all(&ctx->events_wq);
6711894Speter}
6811894Speter
6911894Speterint ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
7011894Speter{
719Sjkh	int rc;
729Sjkh	unsigned long pidr = 0;
7311894Speter	struct pci_dev *dev;
749Sjkh
7511894Speter	// Locks both status & tidr
7611894Speter	mutex_lock(&ctx->status_mutex);
7711894Speter	if (ctx->status != OPENED) {
7811894Speter		rc = -EIO;
7911894Speter		goto out;
8011894Speter	}
8111894Speter
8211894Speter	if (mm)
8311894Speter		pidr = mm->context.id;
8411894Speter
8511894Speter	dev = to_pci_dev(ctx->afu->fn->dev.parent);
8611894Speter	rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr,
8711894Speter			      amr, pci_dev_id(dev), mm, xsl_fault_error, ctx);
8811894Speter	if (rc)
8911894Speter		goto out;
9011894Speter
9111894Speter	ctx->status = ATTACHED;
9211894Speterout:
9311894Speter	mutex_unlock(&ctx->status_mutex);
9411894Speter	return rc;
9511894Speter}
9611894SpeterEXPORT_SYMBOL_GPL(ocxl_context_attach);
9711894Speter
9811894Speterstatic vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address,
9911894Speter		u64 offset, struct ocxl_context *ctx)
10011894Speter{
10111894Speter	u64 trigger_addr;
10211894Speter	int irq_id = ocxl_irq_offset_to_id(ctx, offset);
10311894Speter
1049Sjkh	trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id);
1059Sjkh	if (!trigger_addr)
1069Sjkh		return VM_FAULT_SIGBUS;
1079Sjkh
1089Sjkh	return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT);
1099Sjkh}
1109Sjkh
1119Sjkhstatic vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
1129Sjkh		u64 offset, struct ocxl_context *ctx)
1139Sjkh{
1149Sjkh	u64 pp_mmio_addr;
1159Sjkh	int pasid_off;
1169Sjkh	vm_fault_t ret;
1179Sjkh
1189Sjkh	if (offset >= ctx->afu->config.pp_mmio_stride)
1199Sjkh		return VM_FAULT_SIGBUS;
1209Sjkh
1219Sjkh	mutex_lock(&ctx->status_mutex);
1229Sjkh	if (ctx->status != ATTACHED) {
1239Sjkh		mutex_unlock(&ctx->status_mutex);
1249Sjkh		pr_debug("%s: Context not attached, failing mmio mmap\n",
1259Sjkh			__func__);
1269Sjkh		return VM_FAULT_SIGBUS;
12711894Speter	}
12811894Speter
12911894Speter	pasid_off = ctx->pasid - ctx->afu->pasid_base;
13011894Speter	pp_mmio_addr = ctx->afu->pp_mmio_start +
13111894Speter		pasid_off * ctx->afu->config.pp_mmio_stride +
13211894Speter		offset;
13311894Speter
1349Sjkh	ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
13511894Speter	mutex_unlock(&ctx->status_mutex);
1369Sjkh	return ret;
1379Sjkh}
13811894Speter
1391880Swollmanstatic vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf)
14011894Speter{
14111894Speter	struct vm_area_struct *vma = vmf->vma;
1429Sjkh	struct ocxl_context *ctx = vma->vm_file->private_data;
1439Sjkh	u64 offset;
1449Sjkh	vm_fault_t ret;
1459Sjkh
1469Sjkh	offset = vmf->pgoff << PAGE_SHIFT;
1479Sjkh	pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__,
14811894Speter		ctx->pasid, vmf->address, offset);
14911894Speter
1509Sjkh	if (offset < ctx->afu->irq_base_offset)
15111894Speter		ret = map_pp_mmio(vma, vmf->address, offset, ctx);
1529Sjkh	else
1539Sjkh		ret = map_afu_irq(vma, vmf->address, offset, ctx);
1549Sjkh	return ret;
15511894Speter}
15611894Speter
1579Sjkhstatic const struct vm_operations_struct ocxl_vmops = {
15811894Speter	.fault = ocxl_mmap_fault,
15911894Speter};
1609Sjkh
16111894Speterstatic int check_mmap_afu_irq(struct ocxl_context *ctx,
162121536Speter			struct vm_area_struct *vma)
1639Sjkh{
1649Sjkh	int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT);
1659Sjkh
1669Sjkh	/* only one page */
1679Sjkh	if (vma_pages(vma) != 1)
1681880Swollman		return -EINVAL;
1699Sjkh
1709Sjkh	/* check offset validty */
1719Sjkh	if (!ocxl_afu_irq_get_addr(ctx, irq_id))
1729Sjkh		return -EINVAL;
17311894Speter
1749Sjkh	/*
17576512Skris	 * trigger page should only be accessible in write mode.
1769Sjkh	 *
17711894Speter	 * It's a bit theoretical, as a page mmaped with only
17811894Speter	 * PROT_WRITE is currently readable, but it doesn't hurt.
17911894Speter	 */
18011894Speter	if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
1819Sjkh		!(vma->vm_flags & VM_WRITE))
1829Sjkh		return -EINVAL;
1839Sjkh	vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
1841880Swollman	return 0;
1859Sjkh}
1869Sjkh
1871880Swollmanstatic int check_mmap_mmio(struct ocxl_context *ctx,
18811894Speter			struct vm_area_struct *vma)
1899Sjkh{
19011894Speter	if ((vma_pages(vma) + vma->vm_pgoff) >
1919Sjkh		(ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT))
19211894Speter		return -EINVAL;
19311894Speter	return 0;
19411894Speter}
19511894Speter
19611894Speterint ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
19711894Speter{
19811894Speter	int rc;
19911894Speter
20011894Speter	if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset)
20111894Speter		rc = check_mmap_mmio(ctx, vma);
20211894Speter	else
20311894Speter		rc = check_mmap_afu_irq(ctx, vma);
2049Sjkh	if (rc)
2059Sjkh		return rc;
2069Sjkh
2071880Swollman	vm_flags_set(vma, VM_IO | VM_PFNMAP);
20811894Speter	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
20911894Speter	vma->vm_ops = &ocxl_vmops;
21011894Speter	return 0;
21111894Speter}
21211894Speter
21311894Speterint ocxl_context_detach(struct ocxl_context *ctx)
21411904Speter{
2159Sjkh	struct pci_dev *dev;
2169Sjkh	int afu_control_pos;
2179Sjkh	enum ocxl_context_status status;
2189Sjkh	int rc;
21911894Speter
22011894Speter	mutex_lock(&ctx->status_mutex);
2219Sjkh	status = ctx->status;
2229Sjkh	ctx->status = CLOSED;
2239Sjkh	mutex_unlock(&ctx->status_mutex);
2249Sjkh	if (status != ATTACHED)
22511904Speter		return 0;
2269Sjkh
22711894Speter	dev = to_pci_dev(ctx->afu->fn->dev.parent);
22811894Speter	afu_control_pos = ctx->afu->config.dvsec_afu_control_pos;
22911894Speter
2309Sjkh	mutex_lock(&ctx->afu->afu_control_lock);
2319Sjkh	rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid);
23211894Speter	mutex_unlock(&ctx->afu->afu_control_lock);
23311894Speter	trace_ocxl_terminate_pasid(ctx->pasid, rc);
2349Sjkh	if (rc) {
23511894Speter		/*
2369Sjkh		 * If we timeout waiting for the AFU to terminate the
2379Sjkh		 * pasid, then it's dangerous to clean up the Process
2389Sjkh		 * Element entry in the SPA, as it may be referenced
2399Sjkh		 * in the future by the AFU. In which case, we would
2409Sjkh		 * checkstop because of an invalid PE access (FIR
2419Sjkh		 * register 2, bit 42). So leave the PE
2429Sjkh		 * defined. Caller shouldn't free the context so that
24311894Speter		 * PASID remains allocated.
24411894Speter		 *
24511894Speter		 * A link reset will be required to cleanup the AFU
2469Sjkh		 * and the SPA.
2479Sjkh		 */
24811894Speter		if (rc == -EBUSY)
24911894Speter			return rc;
25011894Speter	}
25111894Speter	rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid);
2529Sjkh	if (rc) {
2539Sjkh		dev_warn(&dev->dev,
2549Sjkh			"Couldn't remove PE entry cleanly: %d\n", rc);
2559Sjkh	}
2569Sjkh	return 0;
2579Sjkh}
2589SjkhEXPORT_SYMBOL_GPL(ocxl_context_detach);
2599Sjkh
2609Sjkhvoid ocxl_context_detach_all(struct ocxl_afu *afu)
2619Sjkh{
2629Sjkh	struct ocxl_context *ctx;
2639Sjkh	int tmp;
2649Sjkh
2659Sjkh	mutex_lock(&afu->contexts_lock);
2669Sjkh	idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
2679Sjkh		ocxl_context_detach(ctx);
2689Sjkh		/*
26911894Speter		 * We are force detaching - remove any active mmio
2709Sjkh		 * mappings so userspace cannot interfere with the
2719Sjkh		 * card if it comes back.  Easiest way to exercise
2729Sjkh		 * this is to unbind and rebind the driver via sysfs
2739Sjkh		 * while it is in use.
2749Sjkh		 */
2759Sjkh		mutex_lock(&ctx->mapping_lock);
2769Sjkh		if (ctx->mapping)
2779Sjkh			unmap_mapping_range(ctx->mapping, 0, 0, 1);
2789Sjkh		mutex_unlock(&ctx->mapping_lock);
2799Sjkh	}
2809Sjkh	mutex_unlock(&afu->contexts_lock);
2819Sjkh}
2829Sjkh
2839Sjkhvoid ocxl_context_free(struct ocxl_context *ctx)
2849Sjkh{
2859Sjkh	mutex_lock(&ctx->afu->contexts_lock);
2869Sjkh	ctx->afu->pasid_count--;
2879Sjkh	idr_remove(&ctx->afu->contexts_idr, ctx->pasid);
2889Sjkh	mutex_unlock(&ctx->afu->contexts_lock);
2899Sjkh
2909Sjkh	ocxl_afu_irq_free_all(ctx);
2919Sjkh	idr_destroy(&ctx->irq_idr);
2929Sjkh	/* reference to the AFU taken in ocxl_context_alloc() */
2939Sjkh	ocxl_afu_put(ctx->afu);
2949Sjkh	kfree(ctx);
2959Sjkh}
2969SjkhEXPORT_SYMBOL_GPL(ocxl_context_free);
2979Sjkh