Lines Matching refs:cycle

225 	// of syt interval. This comes from the interval of isoc cycle. As 1394
477 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
480 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
527 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
680 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
700 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
803 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
838 // Handle the cycle so that empty packet arrives.
852 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
872 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
874 cycle += addend;
875 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
876 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
877 return cycle;
898 // Align to actual cycle count for the packet which is going to be scheduled.
899 // This module queued the same number of isochronous cycle as the size of queue
900 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
901 // the size of queue for scheduled cycle.
905 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
906 return increment_ohci_cycle_count(cycle, queue_size);
926 unsigned int cycle;
931 cycle = compute_ohci_cycle_count(ctx_header[1]);
932 lost = (next_cycle != cycle);
935 // Fireface skips transmission just for an isoc cycle corresponding
940 lost = (next_cycle != cycle);
942 // Prepare a description for the skipped cycle for
944 desc->cycle = prev_cycle;
958 lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0);
961 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
962 next_cycle, cycle);
967 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
972 desc->cycle = cycle;
994 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
1000 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
1021 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
1024 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
1074 latest_cycle = desc->cycle;
1080 // Compute cycle count with lower 3 bits of second field and cycle field like timestamp
1085 // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since
1093 // the most recent isochronous cycle has been already processed.
1099 // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle
1189 build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length,
1220 unsigned int cycle;
1228 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
1229 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1265 unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
1267 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
1355 unsigned int cycle;
1364 cycle = compute_ohci_cycle_count(ctx_header[1]);
1365 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1394 unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
1396 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
1486 // Decide the cycle count to begin processing content of packet in IR contexts.
1490 unsigned int cycle = UINT_MAX;
1509 if (cycle == UINT_MAX ||
1510 compare_ohci_cycle_count(next_cycle, cycle) > 0)
1511 cycle = next_cycle;
1516 d->processing_cycle.tx_start = cycle;
1598 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1601 unsigned int cycle = s->next_cycle;
1606 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
1607 cycle = s->next_cycle;
1615 d->processing_cycle.rx_start = cycle;
1855 // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
1878 // Process isochronous packets for recent isochronous cycle to handle