Lines Matching refs:ce

124 		struct intel_context *ce;
127 ce = intel_context_create(engine);
128 if (IS_ERR(ce)) {
129 err = PTR_ERR(ce);
133 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
155 intel_context_put(ce);
181 struct intel_context *ce[2] = {};
198 for (n = 0; n < ARRAY_SIZE(ce); n++) {
215 * lite-restore using the RING_TAIL from ce[1] it
216 * will execute garbage from ce[0]->ring.
222 ce[n] = tmp;
224 GEM_BUG_ON(!ce[1]->ring->size);
225 intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
226 lrc_update_regs(ce[1], engine, ce[1]->ring->head);
228 rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
236 GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
243 rq[1] = i915_request_create(ce[1]);
252 * Ensure we do the switch to ce[1] on completion.
274 /* Alternatively preempt the spinner with ce[1] */
278 /* And switch back to ce[0] for good measure */
279 rq[0] = i915_request_create(ce[0]);
296 for (n = 0; n < ARRAY_SIZE(ce); n++) {
297 if (IS_ERR_OR_NULL(ce[n]))
300 intel_context_unpin(ce[n]);
301 intel_context_put(ce[n]);
343 struct intel_context *ce[2] = {};
360 for (n = 0; n < ARRAY_SIZE(ce); n++) {
379 ce[n] = tmp;
383 rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
402 while (intel_ring_direction(ce[0]->ring,
404 ce[0]->ring->tail) <= 0) {
407 tmp = intel_context_create_request(ce[0]);
421 ce[0]->ring->size,
422 ce[0]->ring->tail,
423 ce[0]->ring->emit,
425 GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
427 ce[0]->ring->tail) <= 0);
431 rq = intel_context_create_request(ce[1]);
451 ce[0]->ring->tail, ce[0]->ring->emit,
452 ce[1]->ring->tail, ce[1]->ring->emit);
457 for (n = 0; n < ARRAY_SIZE(ce); n++) {
458 if (IS_ERR_OR_NULL(ce[n]))
461 intel_context_unpin(ce[n]);
462 intel_context_put(ce[n]);
492 struct intel_context *ce;
502 ce = intel_context_create(engine);
503 if (IS_ERR(ce)) {
504 err = PTR_ERR(ce);
508 err = intel_context_pin(ce);
510 intel_context_put(ce);
515 err = i915_active_acquire(&ce->active);
517 intel_context_unpin(ce);
518 intel_context_put(ce);
521 ring = ce->ring;
529 intel_context_unpin(ce);
532 GEM_BUG_ON(intel_context_is_pinned(ce));
533 rq = intel_context_create_request(ce);
534 i915_active_release(&ce->active); /* e.g. async retire */
535 intel_context_put(ce);
600 struct intel_context *ce;
603 ce = intel_context_create(engine);
604 if (IS_ERR(ce)) {
605 err = PTR_ERR(ce);
611 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
664 intel_context_put(ce);
719 struct intel_context *ce;
722 ce = intel_context_create(engine);
723 if (IS_ERR(ce)) {
724 err = PTR_ERR(ce);
728 rq = intel_context_create_request(ce);
729 intel_context_put(ce);
859 struct intel_context *ce;
863 ce = intel_context_create(engine);
864 if (IS_ERR(ce))
865 return ERR_CAST(ce);
867 rq = intel_context_create_request(ce);
883 intel_context_put(ce);
1049 create_rewinder(struct intel_context *ce,
1054 i915_ggtt_offset(ce->engine->status_page.vma) +
1060 rq = intel_context_create_request(ce);
1131 struct intel_context *ce;
1155 ce = intel_context_create(engine);
1156 if (IS_ERR(ce)) {
1157 err = PTR_ERR(ce);
1161 rq[A1] = create_rewinder(ce, NULL, slot, X);
1163 intel_context_put(ce);
1167 rq[A2] = create_rewinder(ce, NULL, slot, Y);
1168 intel_context_put(ce);
1179 ce = intel_context_create(engine);
1180 if (IS_ERR(ce)) {
1181 err = PTR_ERR(ce);
1185 rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
1186 intel_context_put(ce);
1429 struct intel_context *ce;
1436 ce = intel_context_create(engine);
1437 if (IS_ERR(ce)) {
1438 err = PTR_ERR(ce);
1447 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
1448 intel_context_put(ce);
1468 ce = intel_context_create(engine);
1469 if (IS_ERR(ce)) {
1470 err = PTR_ERR(ce);
1474 rq = intel_context_create_request(ce);
1475 intel_context_put(ce);
1720 struct intel_context *ce;
1723 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
1724 if (IS_ERR(ce))
1725 return ERR_CAST(ce);
1727 rq = igt_spinner_create_request(spin, ce, arb);
1728 intel_context_put(ce);
2705 struct intel_context *ce;
2711 ce = intel_context_create(engine);
2712 if (IS_ERR(ce))
2713 return PTR_ERR(ce);
2721 vma = i915_vma_instance(obj, ce->vm, NULL);
2761 rq = intel_context_create_request(ce);
2780 intel_context_put(ce);
2792 intel_context_put(ce);
2800 struct intel_context *ce[2] = {};
2809 for (n = 0; n < ARRAY_SIZE(ce); n++) {
2830 ce[n] = tmp;
2833 rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
2852 while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
2855 tmp = intel_context_create_request(ce[0]);
2869 ce[0]->ring->size,
2870 ce[0]->ring->tail,
2871 ce[0]->ring->emit,
2876 rq = intel_context_create_request(ce[1]);
2896 ce[0]->ring->tail, ce[0]->ring->emit,
2897 ce[1]->ring->tail, ce[1]->ring->emit);
2902 for (n = 0; n < ARRAY_SIZE(ce); n++) {
2903 if (IS_ERR_OR_NULL(ce[n]))
2906 intel_context_unpin(ce[n]);
2907 intel_context_put(ce[n]);
3152 struct intel_context *ce;
3156 ce = intel_context_create(engine);
3157 if (IS_ERR(ce))
3158 return ERR_CAST(ce);
3160 vma = i915_vma_instance(global->obj, ce->vm, NULL);
3176 rq = intel_context_create_request(ce);
3203 intel_context_put(ce);
4026 struct intel_context *ce;
4040 ce = intel_context_create(siblings[n]);
4041 if (IS_ERR(ce)) {
4042 err = PTR_ERR(ce);
4046 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
4047 intel_context_put(ce);
4056 ce = intel_engine_create_virtual(siblings, nsibling, 0);
4057 if (IS_ERR(ce)) {
4058 err = PTR_ERR(ce);
4062 rq = intel_context_create_request(ce);
4063 intel_context_put(ce);
4093 struct intel_context *ce;
4108 ce = intel_engine_create_virtual(siblings, nsibling, 0);
4109 if (IS_ERR(ce)) {
4110 err = PTR_ERR(ce);
4114 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
4115 intel_context_put(ce);
4125 ce = intel_context_create(siblings[n]);
4126 if (IS_ERR(ce)) {
4127 err = PTR_ERR(ce);
4131 rq = intel_context_create_request(ce);
4132 intel_context_put(ce);