/linux-master/net/rxrpc/ |
H A D | rxkad.c | 188 struct scatterlist sg; local 216 sg_init_one(&sg, tmpbuf, tmpsize); 219 skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); 257 struct scatterlist sg; local 278 sg_init_one(&sg, hdr, 8); 281 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 300 struct scatterlist sg; local 325 sg_init_one(&sg, rxkhd 341 struct scatterlist sg; local 415 struct scatterlist sg[16]; local 479 struct scatterlist _sg[4], *sg; local 557 struct scatterlist sg; local 766 struct scatterlist sg[1]; local 869 struct scatterlist sg[1]; local 993 struct scatterlist sg[1]; local [all...] |
/linux-master/net/core/ |
H A D | skmsg.c | 15 if (msg->sg.end > msg->sg.start && 16 elem_first_coalesce < msg->sg.end) 19 if (msg->sg.end < msg->sg.start && 20 (elem_first_coalesce > msg->sg.start || 21 elem_first_coalesce < msg->sg.end)) 31 u32 osize = msg->sg.size; 34 len -= msg->sg.size; 52 i = msg->sg [all...] |
H A D | skbuff.c | 4616 bool csum, sg; local 4647 sg = !!(features & NETIF_F_SG); 4650 if (sg && csum && (mss != GSO_BY_FRAGS)) { 4721 (skb_headlen(list_skb) == len || sg)) { 4765 if (hsize > len || !sg) 4797 if (!sg) { 5041 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, argument 5055 sg_set_buf(sg, skb->data + offset, copy); 5070 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 5075 sg_set_page(&sg[el 5124 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) argument 5156 skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) argument [all...] |
H A D | filter.c | 2610 u32 i = msg->sg.start; 2616 if (len >= msg->sg.size) 2618 } while (i != msg->sg.end); 2620 msg->sg.curr = i; 2621 msg->sg.copybreak = 0; 2645 i = msg->sg.start; 2652 } while (i != msg->sg.end); 2658 /* The start may point into the sg element so we need to also 2662 if (!test_bit(i, msg->sg.copy) && bytes_sg_total <= len) 2673 * will copy the entire sg entr [all...] |
/linux-master/lib/ |
H A D | scatterlist.c | 17 * @sg: The current sg entry 20 * Usually the next entry will be @sg@ + 1, but if this sg element is part 25 struct scatterlist *sg_next(struct scatterlist *sg) argument 27 if (sg_is_last(sg)) 30 sg++; 31 if (unlikely(sg_is_chain(sg))) 32 sg = sg_chain_ptr(sg); 47 sg_nents(struct scatterlist *sg) argument 70 sg_nents_for_len(struct scatterlist *sg, u64 len) argument 105 struct scatterlist *sg, *ret = NULL; local 140 sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) argument 171 sg_kfree(struct scatterlist *sg, unsigned int nents) argument 290 struct scatterlist *sg, *prv; local 615 struct scatterlist *sgl, *sg; local 687 struct scatterlist *sg; local 737 sg_page_count(struct scatterlist *sg) argument 761 sg_dma_page_count(struct scatterlist *sg) argument 814 struct scatterlist *sg; local 1113 struct scatterlist *sg = sgtable->sgl + sgtable->nents; local 1168 struct scatterlist *sg = sgtable->sgl + sgtable->nents; local 1214 struct scatterlist *sg = sgtable->sgl + sgtable->nents; local 1274 struct scatterlist *sg = sgtable->sgl + sgtable->nents; local [all...] |
/linux-master/include/linux/ |
H A D | skmsg.h | 44 struct sk_msg_sg sg; member in struct:sk_msg 142 WARN_ON(i == msg->sg.end && bytes); 176 sk_msg_iter_var_prev(msg->sg.which) 179 sk_msg_iter_var_next(msg->sg.which) 183 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); 185 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); 191 dst->sg.data[which] = src->sg.data[which]; 192 dst->sg.data[which].length = size; 193 dst->sg [all...] |
/linux-master/drivers/spi/ |
H A D | spi.c | 1135 struct scatterlist *sg; local 1154 sg = &sgt->sgl[0]; 1174 sg_set_page(sg, vm_page, 1179 sg_set_buf(sg, sg_buf, min); 1184 sg = sg_next(sg);
|
/linux-master/drivers/nvme/target/ |
H A D | tcp.c | 301 struct scatterlist sg; local 303 sg_init_one(&sg, pdu, len); 304 ahash_request_set_crypt(hash, &sg, pdu + len, len); 355 sgl_free(cmd->req.sg); 357 cmd->req.sg = NULL; 363 struct scatterlist *sg; local 372 sg = &cmd->req.sg[cmd->sg_idx]; 375 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 377 bvec_set_page(iov, sg_page(sg), iov_le [all...] |
H A D | nvmet.h | 368 struct scatterlist *sg; member in struct:nvmet_req
|
H A D | core.c | 98 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { 107 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { 116 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { 945 req->sg = NULL; 1043 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, 1045 if (!req->sg) 1059 pci_p2pmem_free_sgl(req->p2p_dev, req->sg); 1079 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, 1081 if (unlikely(!req->sg)) 1093 sgl_free(req->sg); [all...] |
/linux-master/drivers/nvme/host/ |
H A D | tcp.c | 439 struct scatterlist sg; local 441 sg_init_table(&sg, 1); 442 sg_set_page(&sg, page, len, off); 443 ahash_request_set_crypt(hash, &sg, NULL, len); 450 struct scatterlist sg; local 452 sg_init_one(&sg, pdu, len); 453 ahash_request_set_crypt(hash, &sg, pdu + len, len); 2372 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; local 2374 sg->addr = 0; 2375 sg 2383 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; local 2393 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; local [all...] |
H A D | pci.c | 42 * require an sg allocation that needs more than a page of data. 227 * The sg pointer contains the list of PRP/SGL chunk allocations in addition 568 struct scatterlist *sg; local 570 for_each_sg(sgl, sg, nents, i) { 571 dma_addr_t phys = sg_phys(sg); 572 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 574 i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 575 sg_dma_len(sg)); 585 struct scatterlist *sg = iod->sgt.sgl; local 669 nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, struct scatterlist *sg) argument 691 struct scatterlist *sg = iod->sgt.sgl; local [all...] |
/linux-master/drivers/gpu/drm/xe/ |
H A D | xe_vm.c | 93 if (userptr->sg) { 95 userptr->sg, 98 sg_free_table(userptr->sg); 99 userptr->sg = NULL; 138 userptr->sg = NULL; 141 userptr->sg = &userptr->sgt; 143 ret = dma_map_sgtable(xe->drm.dev, userptr->sg, 149 sg_free_table(userptr->sg); 150 userptr->sg = NULL; 965 if (userptr->sg) { 3331 struct sg_table *sg = to_userptr_vma(vma)->userptr.sg; local [all...] |
/linux-master/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_bo.c | 400 params->sg, params->resv, destroy);
|
/linux-master/drivers/gpu/drm/ttm/ |
H A D | ttm_tt.c | 158 ttm->sg = bo->sg;
|
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ |
H A D | r535.c | 1965 struct scatterlist *sg; local 1994 for_each_sgtable_sg(&rx3->lvl2, sg, i) { 1997 pte = sg_virt(sg); 1998 sgl_end = (void *)pte + sg->length;
|
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_ttm.c | 768 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages 783 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, 790 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 795 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, 801 kfree(ttm->sg); 802 ttm->sg = NULL; 819 if (!ttm->sg || !ttm->sg->sgl) 823 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 824 sg_free_table(ttm->sg); [all...] |
H A D | amdgpu_object.c | 63 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
|
H A D | amdgpu_amdkfd_gpuvm.c | 528 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); local 530 if (!sg) 532 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 533 kfree(sg); 536 sg_dma_address(sg->sgl) = addr; 537 sg->sgl->length = size; 539 sg->sgl->dma_length = size; 541 return sg; 561 ttm->sg 1691 struct sg_table *sg = NULL; local [all...] |
/linux-master/arch/s390/mm/ |
H A D | gmap.c | 254 struct gmap *sg, *next; local 260 list_for_each_entry_safe(sg, next, &gmap->children, list) { 261 list_del(&sg->list); 262 gmap_put(sg); 975 * Expected to be called with sg->mm->mmap_lock in read and 1021 * Expected to be called with sg->mm->mmap_lock in read 1057 * Called with sg->mm->mmap_lock in read. 1190 * @sg: pointer to the shadow guest address space structure 1194 * Called with the sg->guest_table_lock 1196 static inline void gmap_insert_rmap(struct gmap *sg, unsigne argument 1231 gmap_protect_rmap(struct gmap *sg, unsigned long raddr, unsigned long paddr, unsigned long len) argument 1312 gmap_unshadow_page(struct gmap *sg, unsigned long raddr) argument 1332 __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr, unsigned long *pgt) argument 1349 gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr) argument 1379 __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr, unsigned long *sgt) argument 1407 gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr) argument 1437 __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr, unsigned long *r3t) argument 1465 gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr) argument 1495 __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr, unsigned long *r2t) argument 1523 gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr) argument 1553 __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr, unsigned long *r1t) argument 1584 gmap_unshadow(struct gmap *sg) argument 1624 struct gmap *sg; local 1650 gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level) argument 1676 struct gmap *sg, *new; local 1766 gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, int fake) argument 1850 gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, int fake) argument 1934 gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, int fake) argument 2018 gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection, int *fake) argument 2058 gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, int fake) argument 2137 gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) argument 2205 gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr, unsigned long gaddr) argument 2272 struct gmap *gmap, *sg, *next; local [all...] |
/linux-master/kernel/sched/ |
H A D | fair.c | 7394 struct sched_group *sg = sd->groups; local 7396 if (sg->flags & SD_CLUSTER) { 7397 for_each_cpu_wrap(cpu, sched_group_span(sg), target + 1) { 7413 cpumask_andnot(cpus, cpus, sched_group_span(sg)); 9975 * @sg: sched_group candidate to be checked for being the busiest 9978 * Determine if @sg is a busier group than the previously selected 9981 * Return: %true if @sg is a busier group than the previously selected 9986 struct sched_group *sg, 10003 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || 10032 return sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg 9984 update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, struct sched_group *sg, struct sg_lb_stats *sgs) argument 10563 struct sched_group *sg = env->sd->groups; local 11192 struct sched_group *sg = env->sd->groups; local [all...] |
/linux-master/drivers/dma/xilinx/ |
H A D | xdma.c | 614 struct scatterlist *sg; local 616 for_each_sg(sgl, sg, sg_len, i) 617 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); 637 for_each_sg(sgl, sg, sg_len, i) { 638 addr = sg_dma_address(sg); 639 desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num); 640 dev_addr += sg_dma_len(sg);
|
/linux-master/drivers/dma/ |
H A D | tegra186-gpc-dma.c | 480 /* Reset the sg index for cyclic transfers */ 1006 struct scatterlist *sg; local 1071 for_each_sg(sgl, sg, sg_len, i) { 1075 mem = sg_dma_address(sg); 1076 len = sg_dma_len(sg);
|
H A D | pl330.c | 2853 struct scatterlist *sg; local 2866 for_each_sg(sgl, sg, sg_len, i) { 2888 fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg), 2889 sg_dma_len(sg)); 2893 fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma, 2894 sg_dma_len(sg)); 2900 desc->bytes_requested = sg_dma_len(sg);
|
H A D | owl-dma.c | 927 struct scatterlist *sg; local 938 for_each_sg(sgl, sg, sg_len, i) { 939 addr = sg_dma_address(sg); 940 len = sg_dma_len(sg);
|