Lines Matching defs:shm

49  * A typical OP-TEE private shm allocation is 224 bytes (argument struct
85 struct tee_shm *shm;
92 shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
93 if (!shm) {
95 p->u.memref.shm = NULL;
99 rc = tee_shm_get_pa(shm, 0, &pa);
104 p->u.memref.shm = shm;
112 struct tee_shm *shm;
117 shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
119 if (shm) {
121 p->u.memref.shm = shm;
124 p->u.memref.shm = NULL;
188 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
191 if (!p->u.memref.shm) {
196 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
213 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
251 if (tee_shm_is_dynamic(p->u.memref.shm))
321 break; /* All shm's freed */
323 struct tee_shm *shm;
332 shm = reg_pair_to_ptr(res.result.shm_upper32,
334 tee_shm_free(shm);
452 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
494 tee_shm_get_page_offset(shm));
501 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
502 msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
508 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
520 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
551 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
561 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
573 struct tee_shm *shm)
588 struct tee_shm *shm, size_t size, size_t align)
594 if (shm->flags & TEE_SHM_PRIV)
595 return tee_dyn_shm_alloc_helper(shm, size, align, NULL);
597 return tee_dyn_shm_alloc_helper(shm, size, align, optee_shm_register);
601 struct tee_shm *shm)
603 if (!(shm->flags & TEE_SHM_PRIV))
604 tee_dyn_shm_free_helper(shm, optee_shm_unregister);
606 tee_dyn_shm_free_helper(shm, NULL);
651 struct tee_shm *shm;
661 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
664 optee_rpc_cmd_free_suppl(ctx, shm);
667 tee_shm_free(shm);
680 struct tee_shm *shm;
704 shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
707 shm = tee_shm_alloc_priv_buf(optee->ctx, sz);
714 if (IS_ERR(shm)) {
724 pages = tee_shm_get_pages(shm, &page_count);
744 (tee_shm_get_page_offset(shm) &
748 tee_shm_get_page_offset(shm));
752 if (tee_shm_get_pa(shm, 0, &pa)) {
760 arg->params[0].u.tmem.size = tee_shm_get_size(shm);
761 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
766 tee_shm_free(shm);
819 struct tee_shm *shm;
824 shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1);
825 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
828 (unsigned long)shm);
835 kmemleak_not_leak(shm);
838 shm = reg_pair_to_ptr(param->a1, param->a2);
839 tee_shm_free(shm);
853 shm = reg_pair_to_ptr(param->a1, param->a2);
854 arg = tee_shm_get_va(shm, 0);
857 __func__, shm);
876 * @shm: shared memory holding the message to pass to secure world
877 * @offs: offset of the message in @shm
886 struct tee_shm *shm, u_int offs,
900 arg = tee_shm_get_va(shm, offs);
905 rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
910 if (rpc_arg && tee_shm_is_dynamic(shm)) {
912 reg_pair_from_64(&param.a1, &param.a2, (u_long)shm);
917 rc = tee_shm_get_pa(shm, offs, &parg);
1359 pr_err("static shm service not available\n");
1747 * Ensure that there are no pre-existing shm objects before enabling
1748 * the shm cache so that there's no chance of receiving an invalid
1751 * shm cache.
1756 * Only enable the shm cache in case we're not able to pass the RPC