Lines Matching refs:start_index

182 				 unsigned long start_index,
186 unsigned long npages = last_index - start_index + 1;
239 unsigned long start_index,
242 unsigned long start_iova = iopt_area_index_to_iova(area, start_index);
377 struct iopt_area *area, unsigned long start_index,
384 iova = iopt_area_index_to_iova(area, start_index);
385 if (start_index == iopt_area_index(area))
387 while (start_index <= last_index) {
398 start_index++;
404 unsigned long start_index,
412 iova = iopt_area_index_to_iova(area, start_index);
413 if (start_index == iopt_area_index(area))
415 while (start_index <= last_index) {
420 start_index++;
429 unsigned long start_index,
435 batch_from_domain(batch, domain, area, start_index, last_index);
476 struct iopt_area *area, unsigned long start_index)
488 if (start_index == iopt_area_index(area))
491 iopt_area_index_to_iova(area, start_index);
520 unsigned long start_index,
523 XA_STATE(xas, xa, start_index);
533 start_index == last_index)
535 start_index++;
541 unsigned long start_index,
544 XA_STATE(xas, xa, start_index);
556 if (start_index == last_index)
558 start_index++;
563 static void clear_xarray(struct xarray *xa, unsigned long start_index,
566 XA_STATE(xas, xa, start_index);
575 static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
578 struct page **end_pages = pages + (last_index - start_index) + 1;
580 XA_STATE(xas, xa, start_index);
608 if (xas.xa_index != start_index)
609 clear_xarray(xa, start_index, xas.xa_index - 1);
738 unsigned long start_index,
747 WARN_ON(last_index < start_index))
753 (last_index - start_index + 1) * sizeof(*user->upages);
772 npages = min_t(unsigned long, last_index - start_index + 1,
779 uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
798 user->upages_start = start_index;
799 user->upages_end = start_index + rc;
979 unsigned long start_index = pfns->batch_end_index;
984 WARN_ON(span->last_used < start_index))
989 start_index, span->last_used);
999 area = iopt_pages_find_domain_area(pfns->pages, start_index);
1005 &pfns->batch, area->storage_domain, area, start_index,
1010 if (start_index >= pfns->user.upages_end) {
1011 rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index,
1019 (start_index - pfns->user.upages_start),
1020 pfns->user.upages_end - start_index);
1063 unsigned long start_index, unsigned long last_index)
1070 pfns->batch_start_index = start_index;
1071 pfns->batch_end_index = start_index;
1074 rc = batch_init(&pfns->batch, last_index - start_index + 1);
1078 &pages->domains_itree, start_index,
1122 unsigned long start_index, unsigned long last_index)
1127 WARN_ON(last_index < start_index))
1130 rc = pfn_reader_init(pfns, pages, start_index, last_index);
1199 unsigned long start_index, unsigned long last_index,
1203 while (start_index <= last_index) {
1208 max(start_index, *unmapped_end_index);
1214 start_index);
1217 batch_last_index = start_index + batch->total_pfns - 1;
1243 start_index + batch->total_pfns - 1);
1244 *unmapped_end_index = start_index + batch->total_pfns;
1249 batch_last_index - start_index + 1);
1250 start_index = batch_last_index + 1;
1263 unsigned long start_index = iopt_area_index(area);
1264 unsigned long unmapped_end_index = start_index;
1286 &pages->access_itree, start_index,
1521 unsigned long start_index,
1524 while (start_index <= end_index) {
1525 batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index,
1528 start_index += batch->total_pfns;
1536 * @start_index: Starting PFN index
1543 unsigned long start_index,
1554 &pages->domains_itree, start_index,
1559 last_index - start_index + 1,
1580 * @start_index: The first page index in the range
1591 unsigned long start_index,
1595 XA_STATE(xas, &pages->pinned_pfns, start_index);
1599 while (start_index <= last_index) {
1605 start_index++;
1611 unsigned long start_index,
1615 while (start_index != last_index + 1) {
1619 area = iopt_pages_find_domain_area(pages, start_index);
1625 start_index, domain_last,
1627 start_index = domain_last + 1;
1634 unsigned long start_index,
1638 unsigned long cur_index = start_index;
1642 user->upages = out_pages + (cur_index - start_index);
1651 if (start_index != cur_index)
1652 iopt_pages_err_unpin(pages, start_index, cur_index - 1,
1660 * @start_index: The first page index in the range
1671 int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index,
1675 unsigned long xa_end = start_index;
1682 user.upages_len = (last_index - start_index + 1) * sizeof(*out_pages);
1684 &pages->domains_itree, start_index,
1689 cur_pages = out_pages + (span.start_used - start_index);
1696 cur_pages = out_pages + (span.start_used - start_index);
1709 cur_pages = out_pages + (span.start_hole - start_index);
1731 if (start_index != xa_end)
1732 iopt_pages_unfill_xarray(pages, start_index, xa_end - 1);
1744 unsigned long start_index,
1754 rc = pfn_reader_first(&pfns, pages, start_index, last_index);
1834 unsigned long start_index = start_byte / PAGE_SIZE;
1847 if (start_index == last_index)
1848 return iopt_pages_rw_page(pages, start_index,
1851 return iopt_pages_rw_slow(pages, start_index, last_index,
1862 return iopt_pages_rw_slow(pages, start_index,
1905 * @start_index: First page index
1915 int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
1927 access = iopt_pages_get_exact_access(pages, start_index, last_index);
1931 iopt_pages_fill_from_xarray(pages, start_index, last_index,
1943 rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages);
1947 access->node.start = start_index;
1965 * @start_index: First page index
1971 void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index,
1978 access = iopt_pages_get_exact_access(pages, start_index, last_index);
1989 iopt_pages_unfill_xarray(pages, start_index, last_index);