Lines Matching defs:last_index

46  * ULONG_MAX so last_index + 1 cannot overflow.
134 unsigned long first_index, unsigned long last_index)
142 first_index, last_index);
152 iter->last_hole == iter->spans[0].last_index) {
183 unsigned long last_index,
186 unsigned long npages = last_index - start_index + 1;
240 unsigned long last_index)
245 iopt_area_index_to_iova_last(area, last_index) -
372 * reaches last_index, the function will return. The caller should use
378 unsigned long last_index)
387 while (start_index <= last_index) {
405 unsigned long last_index,
415 while (start_index <= last_index) {
430 unsigned long last_index)
435 batch_from_domain(batch, domain, area, start_index, last_index);
521 unsigned long last_index)
533 start_index == last_index)
542 unsigned long last_index)
556 if (start_index == last_index)
564 unsigned long last_index)
570 xas_for_each(&xas, entry, last_index)
576 unsigned long last_index, struct page **pages)
578 struct page **end_pages = pages + (last_index - start_index) + 1;
739 unsigned long last_index)
747 WARN_ON(last_index < start_index))
753 (last_index - start_index + 1) * sizeof(*user->upages);
772 npages = min_t(unsigned long, last_index - start_index + 1,
942 unsigned long last_index;
1026 return pfns->batch_start_index == pfns->last_index + 1;
1036 while (pfns->batch_end_index != pfns->last_index + 1) {
1063 unsigned long start_index, unsigned long last_index)
1072 pfns->last_index = last_index;
1074 rc = batch_init(&pfns->batch, last_index - start_index + 1);
1079 last_index);
1122 unsigned long start_index, unsigned long last_index)
1127 WARN_ON(last_index < start_index))
1130 rc = pfn_reader_init(pfns, pages, start_index, last_index);
1199 unsigned long start_index, unsigned long last_index,
1203 while (start_index <= last_index) {
1206 if (*unmapped_end_index <= last_index) {
1216 last_index);
1219 batch_last_index = last_index;
1234 if (batch_last_index == last_index &&
1235 last_index != real_last_index)
1237 last_index + 1,
1260 unsigned long last_index)
1284 batch_init_backup(&batch, last_index + 1, backup, sizeof(backup));
1287 last_index) {
1295 &unmapped_end_index, last_index);
1301 if (unmapped_end_index != last_index + 1)
1303 last_index);
1537 * @last_index: Last PFN index
1544 unsigned long last_index)
1555 last_index) {
1559 last_index - start_index + 1,
1581 * @last_index: The last page index in the range
1592 unsigned long last_index,
1599 while (start_index <= last_index) {
1612 unsigned long last_index,
1615 while (start_index != last_index + 1) {
1623 domain_last = min(iopt_area_last_index(area), last_index);
1635 unsigned long last_index,
1641 while (cur_index != last_index + 1) {
1643 rc = pfn_reader_user_pin(user, pages, cur_index, last_index);
1661 * @last_index: The last page index in the range
1672 unsigned long last_index, struct page **out_pages)
1682 user.upages_len = (last_index - start_index + 1) * sizeof(*out_pages);
1685 last_index) {
1745 unsigned long last_index, unsigned long offset,
1754 rc = pfn_reader_first(&pfns, pages, start_index, last_index);
1835 unsigned long last_index = (start_byte + length - 1) / PAGE_SIZE;
1847 if (start_index == last_index)
1851 return iopt_pages_rw_slow(pages, start_index, last_index,
1863 last_index,
1906 * @last_index: Inclusive last page index
1916 unsigned long last_index, struct page **out_pages,
1927 access = iopt_pages_get_exact_access(pages, start_index, last_index);
1931 iopt_pages_fill_from_xarray(pages, start_index, last_index,
1943 rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages);
1948 access->node.last = last_index;
1966 * @last_index: Inclusive last page index
1972 unsigned long last_index)
1978 access = iopt_pages_get_exact_access(pages, start_index, last_index);
1989 iopt_pages_unfill_xarray(pages, start_index, last_index);