• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/ehca/

Lines Matching refs:pginfo

267 		struct ehca_mr_pginfo pginfo;
278 memset(&pginfo, 0, sizeof(pginfo));
279 pginfo.type = EHCA_MR_PGI_PHYS;
280 pginfo.num_kpages = num_kpages;
281 pginfo.hwpage_size = hw_pgsize;
282 pginfo.num_hwpages = num_hwpages;
283 pginfo.u.phy.num_phys_buf = num_phys_buf;
284 pginfo.u.phy.phys_buf_array = phys_buf_array;
285 pginfo.next_hwpage =
289 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
322 struct ehca_mr_pginfo pginfo;
396 memset(&pginfo, 0, sizeof(pginfo));
397 pginfo.type = EHCA_MR_PGI_USER;
398 pginfo.hwpage_size = hwpage_size;
399 pginfo.num_kpages = num_kpages;
400 pginfo.num_hwpages = num_hwpages;
401 pginfo.u.usr.region = e_mr->umem;
402 pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
403 pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
408 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
410 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
464 struct ehca_mr_pginfo pginfo;
557 memset(&pginfo, 0, sizeof(pginfo));
558 pginfo.type = EHCA_MR_PGI_PHYS;
559 pginfo.num_kpages = num_kpages;
560 pginfo.hwpage_size = hw_pgsize;
561 pginfo.num_hwpages = num_hwpages;
562 pginfo.u.phy.num_phys_buf = num_phys_buf;
563 pginfo.u.phy.phys_buf_array = phys_buf_array;
564 pginfo.next_hwpage =
573 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
776 struct ehca_mr_pginfo pginfo;
824 memset(&pginfo, 0, sizeof(pginfo));
825 pginfo.hwpage_size = hw_pgsize;
827 * pginfo.num_hwpages==0, ie register_rpages() will not be called
832 mr_access_flags, e_pd, &pginfo,
865 struct ehca_mr_pginfo pginfo;
891 memset(&pginfo, 0, sizeof(pginfo));
892 pginfo.type = EHCA_MR_PGI_FMR;
893 pginfo.num_kpages = list_len;
894 pginfo.hwpage_size = e_fmr->hwpage_size;
895 pginfo.num_hwpages =
896 list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
897 pginfo.u.fmr.page_list = page_list;
898 pginfo.next_hwpage =
899 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
900 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
904 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
1019 struct ehca_mr_pginfo *pginfo);
1027 struct ehca_mr_pginfo *pginfo,
1038 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
1055 ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
1057 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
1065 e_mr->num_kpages = pginfo->num_kpages;
1066 e_mr->num_hwpages = pginfo->num_hwpages;
1067 e_mr->hwpage_size = pginfo->hwpage_size;
1080 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
1082 hipzout.lkey, pginfo, pginfo->num_kpages,
1083 pginfo->num_hwpages, ret);
1090 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1092 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1093 pginfo->num_kpages, pginfo->num_hwpages);
1101 struct ehca_mr_pginfo *pginfo)
1110 if (!pginfo->num_hwpages) /* in case of fmr */
1121 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
1123 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1124 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
1130 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
1151 ehca_encode_hwpage_size(pginfo->hwpage_size),
1154 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1189 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
1191 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
1203 struct ehca_mr_pginfo *pginfo,
1216 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
1225 pginfo_save = *pginfo;
1226 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
1229 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
1230 "kpage=%p", e_mr, pginfo, pginfo->type,
1231 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1251 *pginfo = pginfo_save;
1265 e_mr->num_kpages = pginfo->num_kpages;
1266 e_mr->num_hwpages = pginfo->num_hwpages;
1267 e_mr->hwpage_size = pginfo->hwpage_size;
1280 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
1281 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
1282 pginfo->num_hwpages);
1294 struct ehca_mr_pginfo *pginfo,
1304 if ((pginfo->num_hwpages > MAX_RPAGES) ||
1306 (pginfo->num_hwpages > e_mr->num_hwpages)) {
1308 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
1309 pginfo->num_hwpages, e_mr->num_hwpages);
1324 acl, e_pd, pginfo, lkey, rkey);
1361 e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
1373 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1376 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1392 struct ehca_mr_pginfo pginfo;
1449 memset(&pginfo, 0, sizeof(pginfo));
1450 pginfo.type = EHCA_MR_PGI_FMR;
1453 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1536 struct ehca_mr_pginfo *pginfo)
1544 if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
1549 page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
1555 void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1560 ehca_encode_hwpage_size(pginfo->hwpage_size),
1573 struct ehca_mr_pginfo *pginfo)
1583 pginfo);
1592 struct ehca_mr_pginfo *pginfo)
1601 hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1618 struct ehca_mr_pginfo pginfo;
1648 memset(&pginfo, 0, sizeof(pginfo));
1649 pginfo.type = EHCA_MR_PGI_PHYS;
1650 pginfo.num_kpages = num_kpages;
1651 pginfo.num_hwpages = num_hwpages;
1652 pginfo.hwpage_size = hw_pgsize;
1653 pginfo.u.phy.num_phys_buf = 1;
1654 pginfo.u.phy.phys_buf_array = &ib_pbuf;
1657 &pginfo, &e_mr->ib.ib_mr.lkey,
1852 /* PAGE_SIZE >= pginfo->hwpage_size */
1853 static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1863 int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1866 chunk = pginfo->u.usr.next_chunk;
1867 prev_chunk = pginfo->u.usr.next_chunk;
1869 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1870 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1874 (pginfo->next_hwpage *
1875 pginfo->hwpage_size));
1882 i, pginfo->next_hwpage);
1885 (pginfo->hwpage_cnt)++;
1886 (pginfo->next_hwpage)++;
1888 if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1889 (pginfo->kpage_cnt)++;
1890 (pginfo->u.usr.next_nmap)++;
1891 pginfo->next_hwpage = 0;
1897 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1899 pginfo->u.usr.next_nmap = 0;
1902 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1903 pginfo->u.usr.next_nmap = 0;
1910 pginfo->u.usr.next_chunk =
1912 (&(pginfo->u.usr.region->chunk_list)),
1942 /* PAGE_SIZE < pginfo->hwpage_size */
1943 static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1953 int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1957 chunk = pginfo->u.usr.next_chunk;
1958 prev_chunk = pginfo->u.usr.next_chunk;
1960 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1961 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1976 if (pgaddr & (pginfo->hwpage_size - 1)) {
1977 if (pginfo->hwpage_cnt) {
1983 pginfo->hwpage_size);
1988 pginfo->kpage_cnt =
1990 (pginfo->hwpage_size - 1)) >>
1992 nr_kpages -= pginfo->kpage_cnt;
1995 ~(pginfo->hwpage_size - 1));
2006 pginfo->kpage_cnt++;
2007 pginfo->u.usr.next_nmap++;
2018 pginfo->kpage_cnt += chunk->nmap - i;
2019 pginfo->u.usr.next_nmap += chunk->nmap - i;
2029 pginfo->kpage_cnt += nr_kpages;
2030 pginfo->u.usr.next_nmap += nr_kpages;
2033 (pginfo->hwpage_cnt)++;
2038 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
2040 pginfo->u.usr.next_nmap = 0;
2043 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
2044 pginfo->u.usr.next_nmap = 0;
2051 pginfo->u.usr.next_chunk =
2053 (&(pginfo->u.usr.region->chunk_list)),
2058 static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
2068 pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
2069 num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
2070 pbuf->size, pginfo->hwpage_size);
2071 offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
2072 pginfo->hwpage_size;
2073 while (pginfo->next_hwpage < offs_hw + num_hw) {
2075 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
2076 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
2081 pginfo->kpage_cnt,
2082 pginfo->num_kpages,
2083 pginfo->hwpage_cnt,
2084 pginfo->num_hwpages, i);
2088 (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
2089 (pginfo->next_hwpage * pginfo->hwpage_size));
2093 pbuf->size, pginfo->next_hwpage);
2096 (pginfo->hwpage_cnt)++;
2097 (pginfo->next_hwpage)++;
2098 if (PAGE_SIZE >= pginfo->hwpage_size) {
2099 if (pginfo->next_hwpage %
2100 (PAGE_SIZE / pginfo->hwpage_size) == 0)
2101 (pginfo->kpage_cnt)++;
2103 pginfo->kpage_cnt += pginfo->hwpage_size /
2109 if (pginfo->next_hwpage >= offs_hw + num_hw) {
2110 (pginfo->u.phy.next_buf)++;
2111 pginfo->next_hwpage = 0;
2117 static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
2125 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
2127 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
2128 pginfo->next_hwpage * pginfo->hwpage_size);
2133 pginfo->u.fmr.next_listelem,
2134 pginfo->next_hwpage);
2137 (pginfo->hwpage_cnt)++;
2138 if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
2139 if (pginfo->next_hwpage %
2140 (pginfo->u.fmr.fmr_pgsize /
2141 pginfo->hwpage_size) == 0) {
2142 (pginfo->kpage_cnt)++;
2143 (pginfo->u.fmr.next_listelem)++;
2145 pginfo->next_hwpage = 0;
2147 (pginfo->next_hwpage)++;
2149 unsigned int cnt_per_hwpage = pginfo->hwpage_size /
2150 pginfo->u.fmr.fmr_pgsize;
2156 ~(pginfo->hwpage_size - 1));
2157 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
2165 pginfo->kpage_cnt += cnt_per_hwpage;
2166 pginfo->u.fmr.next_listelem += cnt_per_hwpage;
2175 int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
2181 switch (pginfo->type) {
2183 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
2186 ret = PAGE_SIZE >= pginfo->hwpage_size ?
2187 ehca_set_pagebuf_user1(pginfo, number, kpage) :
2188 ehca_set_pagebuf_user2(pginfo, number, kpage);
2191 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
2194 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
2469 struct ehca_mr_pginfo *pginfo)
2482 hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);