Searched refs:requests (Results 1 - 25 of 92) sorted by path

1234

/linux-master/arch/powerpc/kvm/
H A Dtrace.h106 __field( __u32, requests )
111 __entry->requests = vcpu->requests;
114 TP_printk("vcpu=%x requests=%x",
115 __entry->cpu_nr, __entry->requests)
/linux-master/arch/powerpc/perf/
H A Dhv-24x7.h65 struct hv_24x7_request requests[]; member in struct:hv_24x7_request_buffer
116 * 0 = not all result elements fit into the buffer, additional requests
H A Dhv-24x7.c158 * - limited number of requests per hcall (must fit into 4K bytes)
160 * - 255 requests per hcall
1203 req = request_buffer->requests;
1232 pr_devel("Too many requests for 24x7 HCALL %d\n",
1251 req = (void *) request_buffer->requests + i * req_size;
1580 * For READ transactions, submit all pending 24x7 requests (i.e requests
/linux-master/arch/sparc/lib/
H A DM7memcpy.S451 ! to allow multiple requests to not be blocked by overflowing the
/linux-master/arch/arm/mach-tegra/
H A Dsleep-tegra20.S354 str r2, [r1, #EMC_REQ_CTRL] @ stall incoming DRAM requests
H A Dsleep-tegra30.S848 str r1, [r0, #EMC_REQ_CTRL] @ stall incoming DRAM requests
/linux-master/arch/mips/kernel/
H A Dentry.S139 # notify-resume requests
/linux-master/block/
H A Dbfq-iosched.c37 * processes issuing sequential requests (to boost the throughput),
49 * the I/O requests in a bfq_queue come from an interactive or a soft
164 /* Expiration time of async (0) and sync (1) requests, in ns. */
179 /* Default maximum budget values, in sectors and number of requests. */
215 * first requests from each cooperator. After that, there is very
225 /* hw_tag detection: parallel requests threshold and min samples needed. */
471 * Scheduler run of queue, if there are requests pending and no one in the
502 unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
642 * gets more requests than high prio queue from lower
656 "too many requests
[all...]
H A Dbfq-iosched.h80 * requests from lower priority queues; among requests of the same
81 * queue requests are served according to B-WF2Q+.
179 /* Number of requests allocated in the subtree of this entity */
238 * processes. Besides, it contains I/O requests for only one actuator
276 /* sorted list of pending requests */
280 /* number of sync and async requests queued */
282 /* number of pending metadata requests */
284 /* fifo list of requests in sort_list */
298 /* number of requests o
504 unsigned int requests; /* Number of requests this process has in flight */ member in struct:bfq_io_cq
[all...]
/linux-master/drivers/acpi/acpica/
H A Ddbstats.c86 "%8.2X %8.2X %8.2X %8.2X\n", list->requests, list->hits,
87 list->requests - list->hits, list->object_size);
H A Dutcache.c224 ACPI_MEM_TRACKING(cache->requests++);
/linux-master/drivers/base/
H A Ddevtmpfs.c9 * device which requests a device node, will add a node in this
55 } *requests; variable in typeref:struct:req
102 req->next = requests;
103 requests = req;
393 while (requests) {
394 struct req *req = requests;
395 requests = NULL;
/linux-master/drivers/block/
H A Dxen-blkfront.c112 * Id of the sibling if we ever need 2 requests when handling a
133 * Maximum number of segments in indirect requests, the actual value used by
141 "Maximum amount of segments in indirect requests (default is 32)");
231 struct list_head requests; member in struct:blkfront_info
651 * previous requests. This is OK as long as
734 /* Check if we have enough persistent grants to allocate a requests */
815 /* Link the 2 requests together */
1244 * Clear persistent grants present in requests already
1304 /* Prevent new requests being issued until we fix things up. */
2032 /* Kick any other new requests queue
[all...]
/linux-master/drivers/crypto/inside-secure/
H A Dsafexcel.c809 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
857 * dequeue other requests as this is valid and not an error.
868 /* Not enough resources to handle all the requests. Bail out and save
880 priv->ring[ring].requests += nreq;
1056 /* If the number of requests overflowed the counter, try to proceed more
1057 * requests.
1065 priv->ring[ring].requests -= handled;
1068 if (!priv->ring[ring].requests)
1675 priv->ring[i].requests = 0;
H A Dsafexcel.h58 /* Custom on-stack requests (for invalidation) */
711 /* Number of requests in the engine. */
712 int requests; member in struct:safexcel_ring
717 /* Store for current requests when bailing out of the dequeueing
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dcwsr_trap_handler_gfx10.asm1225 // Otherwise retain PRIV=1 for subsequent context save requests.
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_context.c219 * hangcheck to ensure that the persistent requests are healthy.
1357 * (and onto a new timeline->requests list).
1360 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1391 * However, we only care about pending requests, so only include
1392 * engines on which there are incomplete requests.
1541 * case we opt to forcibly kill off all remaining requests on
1558 * hangcheck to ensure that the persistent requests are healthy.
H A Di915_gem_execbuffer.c260 /** our requests to build */
261 struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; member in struct:i915_execbuffer
1974 * Using two helper loops for the order of which requests / batches are created
1994 if (eb->requests[i])
1995 return eb->requests[i];
2044 struct i915_request *rq = eb->requests[j];
2148 if (!eb->requests[j])
2151 err = _i915_vma_move_to_active(vma, eb->requests[j],
2155 &eb->requests[j]->fence,
2188 if (!eb->requests[
[all...]
H A Di915_gem_throttle.c26 * Throttle our rendering by waiting until the ring has completed our requests
71 &ce->timeline->requests,
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_mman.c634 cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_context.c415 INIT_LIST_HEAD(&ce->guc_state.requests);
555 * context. The parent list contains the requests for all the contexts
560 list_for_each_entry_reverse(rq, &parent->guc_state.requests,
H A Dintel_context_types.h103 * @signal_lock protects the list of requests that need signaling,
104 * @signals. While there are any requests that need signaling,
110 spinlock_t signal_lock; /* protects signals, the list of requests */
197 * @fences: maintains a list of requests that are currently
206 /** @requests: list of active requests on this context */
207 struct list_head requests; member in struct:intel_context::__anon641
211 * @prio_count: a counter of the number requests in flight in
267 * to insert submit fences between requests in the parallel
H A Dintel_engine.h248 void intel_engine_dump_active_requests(struct list_head *requests,
H A Dintel_engine_cs.c712 /* Free the requests! dma-resv keeps fences around for an eternity */
1570 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
1782 * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
1919 * Return true if there are no requests pending, nothing left to be submitted
1935 /* ELSP is empty, but there are ready requests? E.g. after reset */
2345 void intel_engine_dump_active_requests(struct list_head *requests, argument
2353 list_for_each_entry(rq, requests, sched.link) {
2378 * The GPU is still running so requests are still executing and any
2520 list_for_each_entry_from_reverse(request, &tl->requests, link) {
2531 list_for_each_entry(request, &engine->sched_engine->requests,
[all...]
H A Dintel_execlists_submission.c90 * After processing, if any requests were retired and the queue is not empty
91 * then a new execution list can be submitted. The two requests at the front of
93 * an execution list, if subsequent requests have the same ID as the first then
94 * the two requests must be combined. This is done simply by discarding requests
95 * at the head of the queue until either only one requests is left (in which case
96 * we use a NULL second context) or the first two requests have unique IDs.
98 * By always executing the first two requests in the queue the driver ensures
218 list_for_each_entry_from_reverse(rq, &tl->requests, link) {
270 * will not matter (i.e. all requests t
[all...]

Completed in 364 milliseconds

1234