/linux-master/drivers/scsi/lpfc/ |
H A D | lpfc_init.c | 1258 * state is cleared for the next heart-beat. If the timer expired with the 1509 * the timer for the next timeout period. If lpfc heart-beat mailbox command 5190 /* restart the timer for the next iteration */ 5981 * the next timer cycle to hit. 6000 * timer interrupt, set the start time for the next 6024 * this next timer interval. 6061 /* Calculate MBPI for the next timer interval */ 8794 * The next initialization cycle sets the count and allocates 12529 /* Bump start_cpu to the next slot to minmize the 12578 /* Bump start_cpu to the next slo 12920 struct lpfc_queue *eq, *next; local 12950 struct lpfc_queue *eq, *next; local 14626 struct lpfc_dmabuf *dmabuf, *next; local [all...] |
H A D | lpfc_hbadisc.c | 943 /* First, try to post the next mailbox command to SLI4 device */ 2204 * @next_fcf_index: pointer to holder of next fcf index. 2277 * @next_fcf_index: the index to the next fcf record in hba's fcf table. 2367 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf 2369 * @fcf_index: index to next fcf. 2371 * This function processing the roundrobin fcf failover to next fcf index. 2574 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list 2632 /* Let next new FCF event trigger fast failover */ 2650 * read the next entry; otherwise, this is an eligible FCF 2752 * Read next FC [all...] |
H A D | lpfc_debugfs.c | 134 * dumps it to @buf up to @size number of bytes. It will start at the next entry 200 * dumps it to @buf up to @size number of bytes. It will start at the next entry 684 /* Set it up for the next time */ 1321 * off to NVME Layer to start of next command. 1407 "done -to- Start of next Cmd (in driver)\n"); 2193 struct lpfc_dmabuf *dmabuf, *next; local 2204 list_for_each_entry_safe(dmabuf, next, 3384 /* Set up the offset for next portion of pci cfg read */ 3698 /* Set up the offset for next portion of pci bar read */ 4036 * EQ read and jumps to the next E [all...] |
H A D | lpfc_bsg.c | 63 /* next two flags are here for the auto-delete logic */ 588 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 786 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 793 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 859 struct list_head head, *curr, *next; local 869 list_for_each_safe(curr, next, &head) { 2885 struct list_head head, *curr, *next; local 2922 list_for_each_safe(curr, next, &head) { 2966 dmp = list_entry(next, struct lpfc_dmabuf, list); 2980 dmp = list_entry(next, struc 5261 struct lpfc_dmabuf *dmabuf, *next; local [all...] |
/linux-master/drivers/scsi/bnx2fc/ |
H A D | bnx2fc_tgt.c | 618 u32 conn_id, next; local 629 next = hba->next_conn_id; 639 if (conn_id == next) {
|
/linux-master/drivers/md/ |
H A D | dm-integrity.c | 1356 struct rb_node *next; local 1366 next = rb_next(&node->node); 1367 if (unlikely(!next)) 1370 next_node = container_of(next, struct journal_node, node); 1376 struct rb_node *next; local 1382 next = rb_next(&node->node); 1383 if (unlikely(!next)) 1386 next_node = container_of(next, struct journal_node, node); 2488 struct bio *next = flushes->bi_next; local 2492 flushes = next; [all...] |
/linux-master/drivers/gpu/drm/xe/ |
H A D | xe_exec_queue.c | 201 struct xe_exec_queue *eq, *next; local 205 list_for_each_entry_safe(eq, next, &q->multi_gt_list, 782 struct xe_exec_queue *eq = q, *next; local 784 list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
|
H A D | xe_bo.c | 513 * Note that unmapping the attachment is deferred to the next 2244 struct xe_bo *bo, *next; local 2253 llist_for_each_entry_safe(bo, next, freed, freed)
|
/linux-master/drivers/gpu/drm/scheduler/ |
H A D | sched_entity.c | 509 * the timestamp of the next job, if any. 512 struct drm_sched_job *next; local 514 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 515 if (next) 516 drm_sched_rq_update_fifo(entity, next->submit_ts);
|
/linux-master/drivers/gpu/drm/qxl/ |
H A D | qxl_cmd.c | 227 next_id = info->next; 230 DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
|
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_vma.c | 11 * The above copyright notice and this permission notice (including the next 1702 * the next idle point, or if the object is freed in the meantime. By 1708 * on the next frame (or two, depending on the depth of the swap queue) 1840 struct i915_vma *vma, *next; local 1844 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { 1863 list_for_each_entry_safe(vma, next, &closed, closed_link) { 2022 /* Force a pagefault for domain tracking on next user access */ 2036 * before the next write. 2142 * the next vma from the object, in case there are many, will
|
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_execlists_submission.c | 92 * the queue are next to be submitted but since a context may not occur twice in 181 * scheduling -- each real engine takes the next available request 351 const struct i915_request *next) 363 return rq_prio(prev) >= rq_prio(next); 636 * If this is part of a virtual engine, its next request may 639 * switch (e.g. the next request is not runnable on this 640 * engine). Hopefully, we will already have submitted the next 853 * transferred onto the next idle engine. 951 const struct intel_context *next) 953 if (prev != next) 350 assert_priority_queue(const struct i915_request *prev, const struct i915_request *next) argument 950 can_merge_ctx(const struct intel_context *prev, const struct intel_context *next) argument 967 can_merge_rq(const struct i915_request *prev, const struct i915_request *next) argument [all...] |
H A D | intel_engine_pm.c | 217 /* Check again on the next retirement. */ 247 struct llist_node *node, *next; local 249 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
|
/linux-master/drivers/gpu/drm/i915/display/ |
H A D | intel_bios.c | 11 * The above copyright notice and this permission notice (including the next 351 static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next, argument 355 next->table_size = prev->table_size; 356 next->offset = prev->offset + size;
|
/linux-master/drivers/firmware/efi/libstub/ |
H A D | x86-stub.c | 76 rom->data.next = 0; 150 while (data && data->next) 151 data = (struct setup_data *)(unsigned long)data->next; 167 data->next = (unsigned long)rom; 216 new->next = 0; 222 while (data->next) 223 data = (struct setup_data *)(unsigned long)data->next; 224 data->next = (unsigned long)new; 233 unsigned long end, next; local 268 for (end = start + size; start < end; start = next) { [all...] |
/linux-master/block/ |
H A D | blk.h | 213 struct bio *next) 216 struct bio_integrity_payload *bip_next = bio_integrity(next); 247 struct bio *next) 340 struct request *next); 212 integrity_req_gap_back_merge(struct request *req, struct bio *next) argument 246 integrity_req_gap_back_merge(struct request *req, struct bio *next) argument
|
H A D | blk-mq.c | 843 struct bio *next = bio->bi_next; local 853 bio = next; 876 * If @req has leftover, sets it up for the next range of segments. 1126 struct request *rq, *next; local 1128 llist_for_each_entry_safe(rq, next, entry, ipi_list) 1464 rq = list_entry(rq_list.next, struct request, queuelist); 1481 rq = list_entry(flush_list.next, struct request, queuelist); 1558 unsigned long next; member in struct:blk_expired_data 1575 if (expired->next == 0) 1576 expired->next 4050 struct blk_mq_hw_ctx *hctx, *next; local [all...] |
H A D | blk-merge.c | 53 struct request *prev_rq, struct bio *prev, struct bio *next) 77 * - if 'pb' ends unaligned, the next bio must include 82 bio_get_first_bvec(next, &nb); 132 * If the next starting sector would be misaligned, stop the discard at 670 struct request *next) 676 if (blk_rq_sectors(req) + bio_sectors(next->bio) > 680 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); 688 struct request *next) 692 if (req_gap_back_merge(req, next->bio)) 698 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 52 bio_will_gap(struct request_queue *q, struct request *prev_rq, struct bio *prev, struct bio *next) argument 669 req_attempt_discard_merge(struct request_queue *q, struct request *req, struct request *next) argument 687 ll_merge_requests_fn(struct request_queue *q, struct request *req, struct request *next) argument 786 blk_try_req_merge(struct request *req, struct request *next) argument 801 attempt_merge(struct request_queue *q, struct request *req, struct request *next) argument 890 struct request *next = elv_latter_request(q, rq); local 914 blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next) argument [all...] |
/linux-master/tools/lib/bpf/ |
H A D | libbpf.c | 9023 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) argument 9025 struct bpf_program *prog = next; 10254 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) argument 10256 if (next == NULL) { 10262 return __bpf_map__iter(next, obj, -1);
|
/linux-master/net/ipv4/ |
H A D | tcp.c | 53 * Alan Cox : Fixed assorted sk->rqueue->next errors 1826 /* worst case: skip to next skb. try to improve on this case below */ 1848 * mappable_offset: Bytes till next mappable frag, *not* counting bytes 2147 skb = skb->next; 2177 /* Either full batch, or we're about to go to next skb 2688 int next = (int)new_state[sk->sk_state]; local 2689 int ns = next & TCP_STATE_MASK; 2693 return next & TCP_ACTION_FIN;
|
H A D | nexthop.c | 592 struct rb_node **pp, *parent = NULL, *next; local 598 next = rcu_dereference_raw(*pp); 599 if (!next) 601 parent = next; 605 pp = &next->rb_left; 607 pp = &next->rb_right; 1763 * occupied, or it belongs to a next hop that is 1765 * corresponding underweight next hop. 1811 /* Deadline is the next time that upkeep should be run. It is the 1848 /* If the group is still unbalanced, schedule the next upkee 2533 struct rb_node **pp, *parent = NULL, *next; local [all...] |
H A D | inet_fragment.c | 279 struct sk_buff *next = FRAG_CB(skb)->next_frag; local 283 skb = next; 531 nextp = &clone->next; 574 /* fp points to the next sk_buff in the current run; 575 * rbn points to the next run. 602 nextp = &fp->next; 607 /* Move to the next run. */
|
/linux-master/net/core/ |
H A D | sock.c | 2972 struct sk_buff *skb, *next; variable in typeref:struct: 2980 next = skb->next; 2981 prefetch(next); variable 2988 skb = next; 4120 .next = proto_seq_next,
|
/linux-master/kernel/trace/ |
H A D | trace_probe.c | 195 * Set the error position is next to the last arg + space. 528 char *next; local 548 next = NULL; 549 is_ptr = split_next_field(fieldname, &next, ctx); 578 ctx->offset += next - fieldname; 579 fieldname = next;
|
/linux-master/kernel/ |
H A D | crash_reserve.c | 239 goto next; 247 next:
|