/linux-master/drivers/xen/ |
H A D | grant-dma-ops.c | 234 static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg, argument 244 for_each_sg(sg, s, nents, i) 249 static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg, argument 259 for_each_sg(sg, s, nents, i) { 271 xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 272 sg_dma_len(sg) = 0;
|
/linux-master/crypto/ |
H A D | gcm.c | 69 struct scatterlist sg; member in struct:crypto_gcm_req_priv_ctx 79 struct scatterlist sg; member in struct:__anon131 104 struct scatterlist sg[1]; member in struct:__anon132 122 sg_init_one(data->sg, &data->hash, sizeof(data->hash)); 128 skcipher_request_set_crypt(&data->req, data->sg, data->sg, 156 struct scatterlist *sg; local 164 sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); 165 if (sg != pctx->src + 1) 166 sg_chain(pctx->src, 2, sg); 725 struct scatterlist *sg; local [all...] |
/linux-master/net/ipv4/ |
H A D | tcp_sigpool.c | 326 struct scatterlist sg; local 329 sg_init_table(&sg, 1); 331 sg_set_buf(&sg, ((u8 *)tp) + header_len, head_data_len); 332 ahash_request_set_crypt(req, &sg, NULL, head_data_len); 342 sg_set_page(&sg, page, skb_frag_size(f), offset_in_page(offset)); 343 ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
|
/linux-master/include/linux/ |
H A D | kmsan.h | 200 * @sg: scatterlist holding DMA buffers. 209 void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, 319 static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, argument
|
H A D | dma-map-ops.h | 60 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, 62 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, 75 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, 77 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, 417 bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, 419 bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, 503 struct scatterlist *sg); 507 struct scatterlist *sg) 506 pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, struct scatterlist *sg) argument
|
/linux-master/drivers/infiniband/hw/usnic/ |
H A D | usnic_uiom.c | 69 struct scatterlist *sg; local 74 for_each_sg(chunk->page_list, sg, chunk->nents, i) { 75 page = sg_page(sg); 76 pa = sg_phys(sg); 90 struct scatterlist *sg; local 162 for_each_sg(chunk->page_list, sg, chunk->nents, i) { 163 sg_set_page(sg, page_list[i + off], 165 pa = sg_phys(sg);
|
/linux-master/arch/powerpc/platforms/512x/ |
H A D | mpc512x_lpbfifo.c | 166 struct scatterlist sg; local 248 sg_init_table(&sg, 1); 250 sg_dma_address(&sg) = dma_map_single(dma_dev->dev, 252 if (dma_mapping_error(dma_dev->dev, sg_dma_address(&sg))) 255 lpbfifo.ram_bus_addr = sg_dma_address(&sg); /* For freeing later */ 257 sg_dma_len(&sg) = lpbfifo.req->size; 259 dma_tx = dmaengine_prep_slave_sg(lpbfifo.chan, &sg, 327 dma_unmap_single(dma_dev->dev, sg_dma_address(&sg),
|
/linux-master/sound/soc/sprd/ |
H A D | sprd-pcm-dma.c | 199 struct scatterlist *sg; local 221 sg = devm_kcalloc(component->dev, sg_num, sizeof(*sg), GFP_KERNEL); 222 if (!sg) { 233 struct scatterlist *sgt = sg; 279 data->desc = chan->device->device_prep_slave_sg(chan, sg, 283 dev_err(component->dev, "failed to prepare slave sg\n"); 294 devm_kfree(component->dev, sg); 299 devm_kfree(component->dev, sg);
|
/linux-master/drivers/dma/qcom/ |
H A D | qcom_adm.c | 224 * @sg: Scatterlist entry 230 struct scatterlist *sg, u32 crci, 236 u32 remainder = sg_dma_len(sg); 238 u32 mem_addr = sg_dma_address(sg); 280 if (sg_is_last(sg)) 283 if (box_desc && sg_is_last(sg)) 295 * @sg: Scatterlist entry 299 struct scatterlist *sg, 303 u32 remainder = sg_dma_len(sg); 304 u32 mem_addr = sg_dma_address(sg); 229 adm_process_fc_descriptors(struct adm_chan *achan, void *desc, struct scatterlist *sg, u32 crci, u32 burst, enum dma_transfer_direction direction) argument 298 adm_process_non_fc_descriptors(struct adm_chan *achan, void *desc, struct scatterlist *sg, enum dma_transfer_direction direction) argument 356 struct scatterlist *sg; local [all...] |
/linux-master/drivers/nvme/host/ |
H A D | apple.c | 80 * require an sg allocation that needs more than a page of data. 155 * The sg pointer contains the list of PRP chunk allocations in addition 166 struct scatterlist *sg; member in struct:apple_nvme_iod 339 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 370 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); 376 mempool_free(iod->sg, anv->iod_mempool); 382 struct scatterlist *sg; local 384 for_each_sg(sgl, sg, nents, i) { 385 dma_addr_t phys = sg_phys(sg); 387 pr_warn("sg[ 400 struct scatterlist *sg = iod->sg; local [all...] |
/linux-master/net/rds/ |
H A D | rdma.c | 177 struct scatterlist *sg = NULL; local 257 * pointers to the mr's sg array. We check to see if we've mapped 259 * to the sg array so that we can have one page ref cleanup path. 272 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL); 273 if (!sg) { 278 sg_init_table(sg, nents); 282 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); 291 sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL, 301 kfree(sg); 768 struct scatterlist *sg; local [all...] |
/linux-master/drivers/scsi/aacraid/ |
H A D | aachba.c | 1265 ret = aac_build_sgraw(cmd, &readcmd->sg); 1270 ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); 1301 ret = aac_build_sg64(cmd, &readcmd->sg); 1305 ((le32_to_cpu(readcmd->sg.count) - 1) * 1336 ret = aac_build_sg(cmd, &readcmd->sg); 1340 ((le32_to_cpu(readcmd->sg.count) - 1) * 1399 ret = aac_build_sgraw(cmd, &writecmd->sg); 1404 ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); 1435 ret = aac_build_sg64(cmd, &writecmd->sg); 1439 ((le32_to_cpu(writecmd->sg 3798 struct scatterlist *sg; local 3839 struct scatterlist *sg; local 3881 struct scatterlist *sg; local 3930 struct scatterlist *sg; local 4048 struct scatterlist *sg; local [all...] |
/linux-master/drivers/crypto/ccp/ |
H A D | ccp-crypto-sha.c | 64 struct scatterlist *sg; local 97 sg = NULL; 110 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); 111 if (!sg) { 115 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); 116 if (!sg) { 120 sg_mark_end(sg); 122 sg = rctx->data_sg.sgl; 126 sg = &rctx->buf_sg; 128 sg [all...] |
/linux-master/drivers/gpu/drm/i915/gvt/ |
H A D | dmabuf.c | 50 struct scatterlist *sg; local 79 for_each_sg(st->sgl, sg, page_num, i) { 87 sg->offset = 0; 88 sg->length = PAGE_SIZE; 89 sg_dma_len(sg) = PAGE_SIZE; 90 sg_dma_address(sg) = dma_addr; 98 for_each_sg(st->sgl, sg, i, j) { 99 dma_addr = sg_dma_address(sg); 114 struct scatterlist *sg; local 122 for_each_sg(pages->sgl, sg, fb_inf [all...] |
/linux-master/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_shmem.c | 70 struct scatterlist *sg; local 99 sg = st->sgl; 157 sg->length >= max_segment || 160 sg = sg_next(sg); 163 sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0); 166 sg->length += nr_pages * PAGE_SIZE; 174 if (sg) /* loop terminated early; short sg table */ 175 sg_mark_end(sg); [all...] |
/linux-master/drivers/mmc/host/ |
H A D | cavium.c | 377 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); 397 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); 521 count = dma_map_sg(host->dev, data->sg, data->sg_len, 533 (sg_dma_len(&data->sg[0]) / 8) - 1); 535 addr = sg_dma_address(&data->sg[0]); 541 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count); 549 * Queue complete sg list into the FIFO. 554 struct scatterlist *sg; local 558 count = dma_map_sg(host->dev, data->sg, data->sg_len, 568 for_each_sg(data->sg, s [all...] |
H A D | tifm_sd.c | 168 struct scatterlist *sg = r_data->sg; local 176 cnt = sg[host->sg_pos].length - host->block_pos; 189 cnt = sg[host->sg_pos].length; 191 off = sg[host->sg_pos].offset + host->block_pos; 193 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); 224 struct scatterlist *sg = r_data->sg; local 232 cnt = sg[host->sg_pos].length - host->block_pos; 238 cnt = sg[hos 266 struct scatterlist *sg = NULL; local [all...] |
/linux-master/tools/virtio/ |
H A D | vringh_test.c | 299 /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */ 330 struct scatterlist sg[4]; local 354 /* Nasty three-element sg list. */ 355 sg_init_table(sg, num_sg = 3); 356 sg_set_buf(&sg[0], (void *)dbuf, 1); 357 sg_set_buf(&sg[1], (void *)dbuf + 1, 2); 358 sg_set_buf(&sg[2], (void *)dbuf + 3, 1); 361 sg_init_table(sg, num_sg = 2); 362 sg_set_buf(&sg[0], (void *)dbuf, 1); 363 sg_set_buf(&sg[ [all...] |
/linux-master/drivers/crypto/ccree/ |
H A D | cc_buffer_mgr.c | 100 * @sg: SG list 106 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, argument 111 nents = sg_nents_for_len(sg, end); 112 sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, 255 static int cc_map_sg(struct device *dev, struct scatterlist *sg, argument 268 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); 276 ret = dma_map_sg(dev, sg, *nents, direction); 279 dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); 294 /* create sg for the current buffer */ 320 /* create sg fo 658 struct scatterlist *sg; local 685 struct scatterlist *sg; local [all...] |
H A D | cc_buffer_mgr.h | 29 unsigned int nents; //sg nents 67 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
|
/linux-master/drivers/dma/ |
H A D | imx-dma.c | 136 /* For slave sg and cyclic */ 137 struct scatterlist *sg; member in struct:imxdma_desc 265 struct scatterlist *sg = d->sg; local 268 now = min_t(size_t, d->len, sg_dma_len(sg)); 273 imx_dmav1_writel(imxdma, sg->dma_address, 276 imx_dmav1_writel(imxdma, sg->dma_address, 306 d->sg && imxdma_hw_chain(imxdmac)) { 307 d->sg = sg_next(d->sg); 800 struct scatterlist *sg; local [all...] |
/linux-master/drivers/mailbox/ |
H A D | bcm-pdc-mailbox.c | 263 * index. Retained in order to unmap each sg after reply is processed. 392 * given ring index. Retained in order to unmap each sg after reply 697 * @sg: Scatterlist whose buffers contain part of the SPU request 705 static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) argument 713 * for every entry in sg. 720 num_desc = (u32)sg_nents(sg); 734 pdcs->src_sg[pdcs->txout] = sg; 738 while (sg) { 745 * If sg buffer larger than PDC limit, split across 748 bufcnt = sg_dma_len(sg); 873 pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) argument 1103 pdc_desc_count(struct scatterlist *sg) argument [all...] |
/linux-master/arch/s390/kvm/ |
H A D | gaccess.c | 1357 * @sg: pointer to the shadow guest address space structure 1365 static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, argument 1378 kvm = sg->private; 1379 parent = sg->parent; 1381 asce.val = sg->orig_asce; 1432 if (sg->edat_level >= 1) 1436 rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake); 1460 if (sg->edat_level >= 1) 1465 rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake); 1487 if (rtte.cr && asce.p && sg 1562 kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, unsigned long saddr, unsigned long *datptr) argument [all...] |
/linux-master/drivers/crypto/stm32/ |
H A D | stm32-hash.c | 162 struct scatterlist *sg; member in struct:stm32_hash_request_ctx 338 count = min(rctx->sg->length - rctx->offset, rctx->total); 342 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { 343 rctx->sg = sg_next(rctx->sg); 351 rctx->sg, rctx->offset, count, 0); 357 if (rctx->offset == rctx->sg->length) { 358 rctx->sg = sg_next(rctx->sg); 518 stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, struct scatterlist *sg, int length, int mdma) argument 655 struct scatterlist sg[1], *tsg; local 770 struct scatterlist *sg; local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
H A D | sdk.h | 81 /** @sg: Scatter-gather entries pointing to the data in memory */ 82 struct mlx5_fpga_dma_entry sg[2]; member in struct:mlx5_fpga_dma_buf 112 * The size of the actual packet received is specified in buf.sg[0].size
|