/linux-master/drivers/gpu/drm/i915/gt/selftests/ |
H A D | mock_timeline.c | 19 INIT_LIST_HEAD(&timeline->requests);
|
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_priolist_types.h | 42 struct list_head requests; member in struct:i915_priolist
|
H A D | i915_scheduler.h | 20 list_for_each_entry(it, &(plist)->requests, sched.link) 23 list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
|
H A D | i915_scheduler_types.h | 45 * at various points to reorder the requests whilst keeping the requests 112 * @lock: protects requests in priority lists, requests, hold and 118 * @requests: list of requests inflight on this schedule engine 120 struct list_head requests; member in struct:i915_sched_engine 123 * @hold: list of ready requests, but on hold 140 * When we add requests into the queue, or adjust the priority of 141 * executing requests, w [all...] |
H A D | i915_scheduler.c | 85 return &p->requests; 102 * requests, so if userspace lied about their 111 INIT_LIST_HEAD(&p->requests); 116 return &p->requests; 190 * end result is a topological list of requests in reverse order, the 467 INIT_LIST_HEAD(&sched_engine->requests);
|
/linux-master/arch/powerpc/kvm/ |
H A D | trace.h | 106 __field( __u32, requests ) 111 __entry->requests = vcpu->requests; 114 TP_printk("vcpu=%x requests=%x", 115 __entry->cpu_nr, __entry->requests)
|
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_requests.c | 21 list_for_each_entry_safe(rq, rn, &tl->requests, link) 31 return !list_empty(&engine->kernel_context->timeline->requests); 208 container_of(work, typeof(*gt), requests.retire_work.work); 210 queue_delayed_work(gt->i915->unordered_wq, >->requests.retire_work, 217 INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); 222 cancel_delayed_work(>->requests.retire_work); 227 queue_delayed_work(gt->i915->unordered_wq, >->requests.retire_work, 234 cancel_delayed_work_sync(>->requests.retire_work);
|
H A D | intel_timeline_types.h | 25 struct mutex mutex; /* protects the flow of requests */ 29 * How many requests are in flight or may be under construction. 34 * context so that we can issue requests at any time without having 54 * List of breadcrumbs associated with GPU requests currently 57 struct list_head requests; member in struct:intel_timeline
|
H A D | intel_timeline.h | 98 return list_is_last_rcu(&rq->link, &tl->requests);
|
H A D | intel_ring.c | 203 GEM_BUG_ON(list_empty(&tl->requests)); 204 list_for_each_entry(target, &tl->requests, link) { 214 if (GEM_WARN_ON(&target->link == &tl->requests))
|
H A D | intel_timeline.c | 106 INIT_LIST_HEAD(&timeline->requests); 226 /* Must be pinned to be writable, and no requests in flight. */ 397 GEM_BUG_ON(!list_empty(&timeline->requests)); 441 list_for_each_entry_safe(rq, rn, &tl->requests, link) { 466 list_for_each_entry_safe(rq, rn, &tl->requests, link)
|
/linux-master/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_guc_ct.c | 78 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this 122 * destruction requests as they seem to be the slowest operation. 144 spin_lock_init(&ct->requests.lock); 145 INIT_LIST_HEAD(&ct->requests.pending); 146 INIT_LIST_HEAD(&ct->requests.incoming); 150 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); 409 unsigned int lost = fence % ARRAY_SIZE(ct->requests.lost_and_found); 417 ct->requests.lost_and_found[lost].stack = stack_depot_save(entries, n, GFP_NOWAIT); 419 ct->requests.lost_and_found[lost].fence = fence; 420 ct->requests [all...] |
H A D | intel_guc_ct.h | 60 * for the H2G and G2H requests sent and received through the buffers. 80 spinlock_t lock; /* protects pending requests list */ 81 struct list_head pending; /* requests waiting for response */ 83 struct list_head incoming; /* incoming requests */ 84 struct work_struct worker; /* handler for incoming requests */ 95 } requests; member in struct:intel_guc_ct
|
/linux-master/drivers/gpu/drm/i915/selftests/ |
H A D | i915_mock_selftests.h | 27 selftest(requests, i915_request_mock_selftests)
|
H A D | i915_live_selftests.h | 29 selftest(requests, i915_request_live_selftests)
|
/linux-master/drivers/md/dm-vdo/indexer/ |
H A D | index-session.h | 23 * will not accept any further requests and can only be closed. Closing the index will clear the 28 /* Post requests that found an entry */ 30 /* Post requests found in the open chapter */ 32 /* Post requests found in the dense index */ 34 /* Post requests found in the sparse index */ 36 /* Post requests that did not find an entry */ 38 /* Update requests that found an entry */ 40 /* Update requests that did not find an entry */ 42 /* Delete requests that found an entry */ 44 /* Delete requests tha 51 u64 requests; variable [all...] |
/linux-master/arch/powerpc/perf/ |
H A D | hv-24x7.h | 65 struct hv_24x7_request requests[]; member in struct:hv_24x7_request_buffer 116 * 0 = not all result elements fit into the buffer, additional requests
|
/linux-master/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_throttle.c | 26 * Throttle our rendering by waiting until the ring has completed our requests 71 &ce->timeline->requests,
|
H A D | i915_gem_execbuffer.c | 260 /** our requests to build */ 261 struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; member in struct:i915_execbuffer 1974 * Using two helper loops for the order of which requests / batches are created 1994 if (eb->requests[i]) 1995 return eb->requests[i]; 2044 struct i915_request *rq = eb->requests[j]; 2148 if (!eb->requests[j]) 2151 err = _i915_vma_move_to_active(vma, eb->requests[j], 2155 &eb->requests[j]->fence, 2188 if (!eb->requests[ [all...] |
/linux-master/net/handshake/ |
H A D | netlink.c | 199 LIST_HEAD(requests); 208 list_splice_init(&requests, &hn->hn_requests); 211 while (!list_empty(&requests)) { 212 req = list_first_entry(&requests, struct handshake_req, hr_list);
|
/linux-master/drivers/media/v4l2-core/ |
H A D | v4l2-ctrls-request.c | 21 INIT_LIST_HEAD(&hdl->requests); 39 if (hdl->req_obj.ops || list_empty(&hdl->requests)) 44 * outstanding requests, then unbind and put those objects before 47 list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { 102 list_del_init(&hdl->requests); 163 list_add_tail(&hdl->requests, &from->requests);
|
/linux-master/drivers/base/ |
H A D | devtmpfs.c | 9 * device which requests a device node, will add a node in this 55 } *requests; variable in typeref:struct:req 102 req->next = requests; 103 requests = req; 393 while (requests) { 394 struct req *req = requests; 395 requests = NULL;
|
/linux-master/drivers/media/pci/tw686x/ |
H A D | tw686x.h | 172 void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests, 178 void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
|
/linux-master/drivers/iio/adc/ |
H A D | twl4030-madc.c | 158 * @requests: Array of request struct corresponding to SW1, SW2 and RT 167 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; member in struct:twl4030_madc_data 439 * corresponding to RT, SW1, SW2 conversion requests. 492 madc->requests[i].result_pending = true; 495 r = &madc->requests[i]; 517 r = &madc->requests[i]; 618 if (twl4030_madc->requests[req->method].active) { 649 twl4030_madc->requests[req->method].active = true; 653 twl4030_madc->requests[req->method].active = false; 658 twl4030_madc->requests[re [all...] |
/linux-master/drivers/vdpa/vdpa_sim/ |
H A D | vdpa_sim_net.c | 51 u64 requests; member in struct:vdpasim_cq_stats 134 u64 requests = 0, errors = 0, successes = 0; local 150 ++requests; 190 net->cq_stats.requests += requests; 377 cq_requests = net->cq_stats.requests; 383 "cvq requests")) 489 * connect the device to the vDPA bus, so requests can arrive after
|