Lines Matching refs:sgl

284  * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
286 * Allocates memory for sgl and overlapping pages. Pages which might
291 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
297 sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
298 sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
299 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
300 sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
303 __func__, user_addr, user_size, sgl->nr_pages,
304 sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
306 sgl->user_addr = user_addr;
307 sgl->user_size = user_size;
308 sgl->write = write;
309 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
311 if (get_order(sgl->sgl_size) > MAX_PAGE_ORDER) {
317 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
318 &sgl->sgl_dma_addr);
319 if (sgl->sgl == NULL) {
326 if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
327 sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
328 &sgl->fpage_dma_addr);
329 if (sgl->fpage == NULL)
333 if (copy_from_user(sgl->fpage + sgl->fpage_offs,
334 user_addr, sgl->fpage_size)) {
339 if (sgl->lpage_size != 0) {
340 sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
341 &sgl->lpage_dma_addr);
342 if (sgl->lpage == NULL)
346 if (copy_from_user(sgl->lpage, user_addr + user_size -
347 sgl->lpage_size, sgl->lpage_size)) {
355 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
356 sgl->lpage_dma_addr);
357 sgl->lpage = NULL;
358 sgl->lpage_dma_addr = 0;
360 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
361 sgl->fpage_dma_addr);
362 sgl->fpage = NULL;
363 sgl->fpage_dma_addr = 0;
365 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
366 sgl->sgl_dma_addr);
367 sgl->sgl = NULL;
368 sgl->sgl_dma_addr = 0;
369 sgl->sgl_size = 0;
374 int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
381 size_t size = sgl->user_size;
384 map_offs = sgl->fpage_offs; /* offset in first page */
386 s = &sgl->sgl[0]; /* first set of 8 entries */
388 while (p < sgl->nr_pages) {
394 s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
403 if ((p == 0) && (sgl->fpage != NULL)) {
404 daddr = sgl->fpage_dma_addr + map_offs;
406 } else if ((p == sgl->nr_pages - 1) &&
407 (sgl->lpage != NULL)) {
408 daddr = sgl->lpage_dma_addr;
427 if (p == sgl->nr_pages)
443 if (p == sgl->nr_pages)
451 s -= 8; /* full shift needed on previous sgl block */
465 * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
467 * @sgl: scatter gather list describing user-space memory
470 * the sgl and the cached pages. Data is being transferred from cached
473 int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
480 if (sgl->fpage) {
481 if (sgl->write) {
482 res = copy_to_user(sgl->user_addr,
483 sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
491 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
492 sgl->fpage_dma_addr);
493 sgl->fpage = NULL;
494 sgl->fpage_dma_addr = 0;
496 if (sgl->lpage) {
497 if (sgl->write) {
498 offset = sgl->user_size - sgl->lpage_size;
499 res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
500 sgl->lpage_size);
508 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
509 sgl->lpage_dma_addr);
510 sgl->lpage = NULL;
511 sgl->lpage_dma_addr = 0;
513 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
514 sgl->sgl_dma_addr);
516 sgl->sgl = NULL;
517 sgl->sgl_dma_addr = 0x0;
518 sgl->sgl_size = 0;