Lines Matching defs:fl

195 	struct fastrpc_user *fl;
217 struct fastrpc_user *fl;
246 struct fastrpc_user *fl;
319 int vmid = map->fl->cctx->vmperms[0].vmid;
328 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
339 if (map->fl) {
340 spin_lock(&map->fl->lock);
342 spin_unlock(&map->fl->lock);
343 map->fl = NULL;
364 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
367 struct fastrpc_session_ctx *sess = fl->sctx;
371 spin_lock(&fl->lock);
372 list_for_each_entry(map, &fl->maps, node) {
389 spin_unlock(&fl->lock);
401 static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
414 buf->fl = fl;
434 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
440 ret = __fastrpc_buf_alloc(fl, dev, size, obuf);
446 if (fl->sctx && fl->sctx->sid)
447 buf->phys += ((u64)fl->sctx->sid << 32);
452 static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev,
455 struct device *rdev = &fl->cctx->rpdev->dev;
457 return __fastrpc_buf_alloc(fl, rdev, size, obuf);
587 ctx->fl = user;
754 static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
757 struct fastrpc_session_ctx *sess = fl->sctx;
762 if (!fastrpc_map_lookup(fl, fd, ppmap, true))
772 map->fl = fl;
798 map->phys += ((u64)fl->sctx->sid << 32);
814 dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
824 spin_lock(&fl->lock);
825 list_add_tail(&map->node, &fl->maps);
826 spin_unlock(&fl->lock);
904 struct device *dev = ctx->fl->sctx->dev;
913 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
936 struct device *dev = ctx->fl->sctx->dev;
956 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
1064 struct fastrpc_user *fl = ctx->fl;
1097 if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
1109 struct fastrpc_user *fl = ctx->fl;
1113 cctx = fl->cctx;
1114 msg->pid = fl->tgid;
1120 msg->ctx = ctx->ctxid | fl->pd;
1136 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
1145 if (!fl->sctx)
1148 if (!fl->cctx->rpdev)
1152 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
1156 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1167 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
1196 spin_lock(&fl->lock);
1198 spin_unlock(&fl->lock);
1203 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1205 list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
1210 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
1215 static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
1218 if (!fl->is_secure_dev && fl->cctx->secure) {
1224 if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
1225 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
1233 static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
1273 if (!fl->cctx->remote_heap) {
1274 err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen,
1275 &fl->cctx->remote_heap);
1280 if (fl->cctx->vmcount) {
1283 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
1284 (u64)fl->cctx->remote_heap->size,
1286 fl->cctx->vmperms, fl->cctx->vmcount);
1288 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
1289 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
1295 inbuf.pgid = fl->tgid;
1298 fl->pd = USER_PD;
1308 pages[0].addr = fl->cctx->remote_heap->phys;
1309 pages[0].size = fl->cctx->remote_heap->size;
1317 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1326 if (fl->cctx->vmcount) {
1331 for (i = 0; i < fl->cctx->vmcount; i++)
1332 src_perms |= BIT(fl->cctx->vmperms[i].vmid);
1336 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
1337 (u64)fl->cctx->remote_heap->size,
1340 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
1341 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
1344 fastrpc_buf_free(fl->cctx->remote_heap);
1353 static int fastrpc_init_create_process(struct fastrpc_user *fl,
1386 if (is_session_rejected(fl, unsigned_module)) {
1396 inbuf.pgid = fl->tgid;
1402 fl->pd = USER_PD;
1405 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
1412 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1417 fl->init_mem = imem;
1449 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1459 fl->init_mem = NULL;
1499 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1505 tgid = fl->tgid;
1511 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1517 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1518 struct fastrpc_channel_ctx *cctx = fl->cctx;
1524 fastrpc_release_current_dsp_process(fl);
1527 list_del(&fl->user);
1530 if (fl->init_mem)
1531 fastrpc_buf_free(fl->init_mem);
1533 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1538 list_for_each_entry_safe(map, m, &fl->maps, node)
1541 list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
1546 fastrpc_session_free(cctx, fl->sctx);
1549 mutex_destroy(&fl->mutex);
1550 kfree(fl);
1560 struct fastrpc_user *fl = NULL;
1566 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1567 if (!fl)
1573 filp->private_data = fl;
1574 spin_lock_init(&fl->lock);
1575 mutex_init(&fl->mutex);
1576 INIT_LIST_HEAD(&fl->pending);
1577 INIT_LIST_HEAD(&fl->maps);
1578 INIT_LIST_HEAD(&fl->mmaps);
1579 INIT_LIST_HEAD(&fl->user);
1580 fl->tgid = current->tgid;
1581 fl->cctx = cctx;
1582 fl->is_secure_dev = fdevice->secure;
1584 fl->sctx = fastrpc_session_alloc(cctx);
1585 if (!fl->sctx) {
1587 mutex_destroy(&fl->mutex);
1588 kfree(fl);
1594 list_add_tail(&fl->user, &cctx->users);
1600 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1610 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1645 static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
1648 int tgid = fl->tgid;
1655 fl->pd = pd;
1657 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1661 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1685 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1691 static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
1705 fl->pd = USER_PD;
1707 return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
1712 struct fastrpc_user *fl)
1714 struct fastrpc_channel_ctx *cctx = fl->cctx;
1733 err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
1755 static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
1765 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
1772 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
1777 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
1782 err = fastrpc_get_info_from_kernel(&cap, fl);
1792 static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
1796 struct device *dev = fl->sctx->dev;
1800 req_msg.pgid = fl->tgid;
1808 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1812 spin_lock(&fl->lock);
1814 spin_unlock(&fl->lock);
1823 static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
1827 struct device *dev = fl->sctx->dev;
1832 spin_lock(&fl->lock);
1833 list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
1839 spin_unlock(&fl->lock);
1847 return fastrpc_req_munmap_impl(fl, buf);
1850 static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
1858 struct device *dev = fl->sctx->dev;
1877 err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf);
1879 err = fastrpc_buf_alloc(fl, dev, req.size, &buf);
1886 req_msg.pgid = fl->tgid;
1904 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1918 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
1922 &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
1924 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
1930 spin_lock(&fl->lock);
1931 list_add_tail(&buf->node, &fl->mmaps);
1932 spin_unlock(&fl->lock);
1945 fastrpc_req_munmap_impl(fl, buf);
1952 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
1959 struct device *dev = fl->sctx->dev;
1961 spin_lock(&fl->lock);
1962 list_for_each_entry_safe(iter, m, &fl->maps, node) {
1969 spin_unlock(&fl->lock);
1976 req_msg.pgid = fl->tgid;
1985 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
1996 static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
2003 return fastrpc_req_mem_unmap_impl(fl, &req);
2006 static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
2014 struct device *dev = fl->sctx->dev;
2023 err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
2029 req_msg.pgid = fl->tgid;
2054 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
2071 fastrpc_req_mem_unmap_impl(fl, &req_unmap);
2086 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
2092 err = fastrpc_invoke(fl, argp);
2095 err = fastrpc_init_attach(fl, ROOT_PD);
2098 err = fastrpc_init_attach(fl, SENSORS_PD);
2101 err = fastrpc_init_create_static_process(fl, argp);
2104 err = fastrpc_init_create_process(fl, argp);
2107 err = fastrpc_dmabuf_alloc(fl, argp);
2110 err = fastrpc_req_mmap(fl, argp);
2113 err = fastrpc_req_munmap(fl, argp);
2116 err = fastrpc_req_mem_map(fl, argp);
2119 err = fastrpc_req_mem_unmap(fl, argp);
2122 err = fastrpc_get_dsp_info(fl, argp);