Lines Matching refs:ctx

45 	struct cxl_context *ctx;
80 if (!(ctx = cxl_context_alloc())) {
85 rc = cxl_context_init(ctx, afu, master);
89 cxl_context_set_mapping(ctx, inode->i_mapping);
91 pr_devel("afu_open pe: %i\n", ctx->pe);
92 file->private_data = ctx;
117 struct cxl_context *ctx = file->private_data;
120 __func__, ctx->pe);
121 cxl_context_detach(ctx);
128 if (!ctx->kernelapi) {
129 mutex_lock(&ctx->mapping_lock);
130 ctx->mapping = NULL;
131 mutex_unlock(&ctx->mapping_lock);
140 cxl_context_free(ctx);
145 static long afu_ioctl_start_work(struct cxl_context *ctx,
152 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
159 mutex_lock(&ctx->status_mutex);
160 if (ctx->status != OPENED) {
177 work.num_interrupts = ctx->afu->pp_irqs;
178 else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
179 (work.num_interrupts > ctx->afu->irqs_max)) {
184 if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
191 ctx->assign_tidr = true;
193 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
199 rc = cxl_adapter_context_get(ctx->afu->adapter);
201 afu_release_irqs(ctx, ctx);
214 ctx->pid = get_task_pid(current, PIDTYPE_PID);
217 ctx->mm = get_task_mm(current);
220 cxl_context_mm_count_get(ctx);
222 if (ctx->mm) {
224 mmput(ctx->mm);
226 mm_context_add_copro(ctx->mm);
249 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
251 if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
253 afu_release_irqs(ctx, ctx);
254 cxl_adapter_context_put(ctx->afu->adapter);
255 put_pid(ctx->pid);
256 ctx->pid = NULL;
258 cxl_context_mm_count_put(ctx);
259 if (ctx->mm)
260 mm_context_remove_copro(ctx->mm);
266 work.tid = ctx->tidr;
271 ctx->status = STARTED;
274 mutex_unlock(&ctx->status_mutex);
278 static long afu_ioctl_process_element(struct cxl_context *ctx,
281 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
283 if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
289 static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
294 afuid.card_id = ctx->afu->adapter->adapter_num;
295 afuid.afu_offset = ctx->afu->slice;
296 afuid.afu_mode = ctx->afu->current_mode;
299 if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
310 struct cxl_context *ctx = file->private_data;
312 if (ctx->status == CLOSED)
315 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
321 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
323 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
325 return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
339 struct cxl_context *ctx = file->private_data;
342 if (ctx->status != STARTED)
345 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
348 return cxl_context_iomap(ctx, vm);
351 static inline bool ctx_event_pending(struct cxl_context *ctx)
353 if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
356 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
364 struct cxl_context *ctx = file->private_data;
369 poll_wait(file, &ctx->wq, poll);
371 pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
373 spin_lock_irqsave(&ctx->lock, flags);
374 if (ctx_event_pending(ctx))
376 else if (ctx->status == CLOSED)
380 spin_unlock_irqrestore(&ctx->lock, flags);
382 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
387 static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
394 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
401 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
407 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
414 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
418 ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
425 struct cxl_context *ctx = file->private_data;
432 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
438 spin_lock_irqsave(&ctx->lock, flags);
441 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
442 if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
445 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
460 spin_unlock_irqrestore(&ctx->lock, flags);
464 spin_lock_irqsave(&ctx->lock, flags);
467 finish_wait(&ctx->wq, &wait);
470 event.header.process_element = ctx->pe;
472 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
474 pl = ctx->afu_driver_ops->fetch_event(ctx);
475 atomic_dec(&ctx->afu_driver_events);
477 } else if (ctx->pending_irq) {
481 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
482 clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
483 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
484 ctx->pending_irq = false;
485 } else if (ctx->pending_fault) {
489 event.fault.addr = ctx->fault_addr;
490 event.fault.dsisr = ctx->fault_dsisr;
491 ctx->pending_fault = false;
492 } else if (ctx->pending_afu_err) {
496 event.afu_error.error = ctx->afu_err;
497 ctx->pending_afu_err = false;
498 } else if (ctx->status == CLOSED) {
500 spin_unlock_irqrestore(&ctx->lock, flags);
505 spin_unlock_irqrestore(&ctx->lock, flags);
508 return afu_driver_event_copy(ctx, buf, &event, pl);
515 finish_wait(&ctx->wq, &wait);
516 spin_unlock_irqrestore(&ctx->lock, flags);