Lines Matching refs:buf

48 	void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
51 void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
54 int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
58 int (*map)(struct xen_front_pgdir_shbuf *buf);
61 int (*unmap)(struct xen_front_pgdir_shbuf *buf);
70 * \param buf shared buffer which page directory is of interest.
75 xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
77 if (!buf->grefs)
80 return buf->grefs[0];
90 * references onto the backing storage (buf->pages).
92 * \param buf shared buffer which grants to be mapped.
95 int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
97 if (buf->ops && buf->ops->map)
98 return buf->ops->map(buf);
113 * \param buf shared buffer which grants to be unmapped.
116 int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
118 if (buf->ops && buf->ops->unmap)
119 return buf->ops->unmap(buf);
129 * \param buf shared buffer which resources to be freed.
131 void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
133 if (buf->grefs) {
136 for (i = 0; i < buf->num_grefs; i++)
137 if (buf->grefs[i] != INVALID_GRANT_REF)
138 gnttab_end_foreign_access(buf->grefs[i], NULL);
140 kfree(buf->grefs);
141 kfree(buf->directory);
156 * \param buf shared buffer.
158 static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
160 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
167 * \param buf shared buffer.
169 static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
172 buf->num_grefs = get_num_pages_dir(buf);
179 * \param buf shared buffer.
181 static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
187 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
197 * \param buf shared buffer.
200 static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
205 if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
208 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
213 for (i = 0; i < buf->num_pages; i++) {
216 addr = xen_page_to_vaddr(buf->pages[i]);
218 buf->backend_map_handles[i]);
221 ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
222 buf->num_pages);
224 for (i = 0; i < buf->num_pages; i++) {
226 dev_err(&buf->xb_dev->dev,
232 dev_err(&buf->xb_dev->dev,
236 kfree(buf->backend_map_handles);
237 buf->backend_map_handles = NULL;
244 * \param buf shared buffer.
247 static int backend_map(struct xen_front_pgdir_shbuf *buf)
253 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
257 buf->backend_map_handles = kcalloc(buf->num_pages,
258 sizeof(*buf->backend_map_handles),
260 if (!buf->backend_map_handles) {
267 * buffer we only allocate buf->grefs for the page directory,
268 * so buf->num_grefs has number of pages in the page directory itself.
270 ptr = buf->directory;
271 grefs_left = buf->num_pages;
273 for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
284 addr = xen_page_to_vaddr(buf->pages[cur_page]);
288 buf->xb_dev->otherend_id);
295 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
298 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
300 buf->backend_map_handles[cur_page] =
303 buf->backend_map_handles[cur_page] =
307 dev_err(&buf->xb_dev->dev,
314 dev_err(&buf->xb_dev->dev,
316 backend_unmap(buf);
330 * \param buf shared buffer.
332 static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
338 ptr = buf->directory;
339 num_pages_dir = get_num_pages_dir(buf);
345 page_dir->gref_dir_next_page = buf->grefs[i + 1];
357 * \param buf shared buffer.
359 static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
364 ptr = buf->directory;
365 num_pages_dir = get_num_pages_dir(buf);
372 grefs_left = buf->num_pages;
382 page_dir->gref_dir_next_page = buf->grefs[i + 1];
384 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
398 * \param buf shared buffer.
401 static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
407 otherend_id = buf->xb_dev->otherend_id;
408 for (i = 0; i < buf->num_pages; i++) {
414 xen_page_to_gfn(buf->pages[i]),
416 buf->grefs[gref_idx++] = cur_ref;
427 * \param buf shared buffer.
430 static int grant_references(struct xen_front_pgdir_shbuf *buf)
436 ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
438 dev_err(&buf->xb_dev->dev,
443 otherend_id = buf->xb_dev->otherend_id;
445 num_pages_dir = get_num_pages_dir(buf);
453 frame = xen_page_to_gfn(virt_to_page(buf->directory +
456 buf->grefs[j++] = cur_ref;
459 if (buf->ops->grant_refs_for_buffer) {
460 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
472 * \param buf shared buffer.
475 static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
477 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
478 if (!buf->grefs)
481 buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
482 if (!buf->directory)
517 struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
521 buf->ops = &backend_ops;
523 buf->ops = &local_ops;
524 buf->xb_dev = cfg->xb_dev;
525 buf->num_pages = cfg->num_pages;
526 buf->pages = cfg->pages;
528 buf->ops->calc_num_grefs(buf);
530 ret = alloc_storage(buf);
534 ret = grant_references(buf);
538 buf->ops->fill_page_dir(buf);
543 xen_front_pgdir_shbuf_free(buf);