Lines Matching refs:fl
196 struct fastrpc_user *fl; member
218 struct fastrpc_user *fl; member
247 struct fastrpc_user *fl; member
320 int vmid = map->fl->cctx->vmperms[0].vmid; in fastrpc_free_map()
329 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", in fastrpc_free_map()
340 if (map->fl) { in fastrpc_free_map()
341 spin_lock(&map->fl->lock); in fastrpc_free_map()
343 spin_unlock(&map->fl->lock); in fastrpc_free_map()
344 map->fl = NULL; in fastrpc_free_map()
365 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, in fastrpc_map_lookup() argument
368 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_lookup()
372 spin_lock(&fl->lock); in fastrpc_map_lookup()
373 list_for_each_entry(map, &fl->maps, node) { in fastrpc_map_lookup()
390 spin_unlock(&fl->lock); in fastrpc_map_lookup()
402 static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, in __fastrpc_buf_alloc() argument
415 buf->fl = fl; in __fastrpc_buf_alloc()
435 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, in fastrpc_buf_alloc() argument
441 ret = __fastrpc_buf_alloc(fl, dev, size, obuf); in fastrpc_buf_alloc()
447 if (fl->sctx && fl->sctx->sid) in fastrpc_buf_alloc()
448 buf->phys += ((u64)fl->sctx->sid << 32); in fastrpc_buf_alloc()
453 static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev, in fastrpc_remote_heap_alloc() argument
456 struct device *rdev = &fl->cctx->rpdev->dev; in fastrpc_remote_heap_alloc()
458 return __fastrpc_buf_alloc(fl, rdev, size, obuf); in fastrpc_remote_heap_alloc()
588 ctx->fl = user; in fastrpc_context_alloc()
755 static int fastrpc_map_create(struct fastrpc_user *fl, int fd, in fastrpc_map_create() argument
758 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_create()
763 if (!fastrpc_map_lookup(fl, fd, ppmap, true)) in fastrpc_map_create()
773 map->fl = fl; in fastrpc_map_create()
799 map->phys += ((u64)fl->sctx->sid << 32); in fastrpc_map_create()
815 dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; in fastrpc_map_create()
825 spin_lock(&fl->lock); in fastrpc_map_create()
826 list_add_tail(&map->node, &fl->maps); in fastrpc_map_create()
827 spin_unlock(&fl->lock); in fastrpc_map_create()
905 struct device *dev = ctx->fl->sctx->dev; in fastrpc_create_maps()
914 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, in fastrpc_create_maps()
937 struct device *dev = ctx->fl->sctx->dev; in fastrpc_get_args()
957 if (ctx->fl->sctx->sid) in fastrpc_get_args()
958 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
960 err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
1068 struct fastrpc_user *fl = ctx->fl; in fastrpc_put_args() local
1101 if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) in fastrpc_put_args()
1113 struct fastrpc_user *fl = ctx->fl; in fastrpc_invoke_send() local
1117 cctx = fl->cctx; in fastrpc_invoke_send()
1118 msg->pid = fl->tgid; in fastrpc_invoke_send()
1124 msg->ctx = ctx->ctxid | fl->pd; in fastrpc_invoke_send()
1140 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, in fastrpc_internal_invoke() argument
1149 if (!fl->sctx) in fastrpc_internal_invoke()
1152 if (!fl->cctx->rpdev) in fastrpc_internal_invoke()
1156 …dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle… in fastrpc_internal_invoke()
1160 ctx = fastrpc_context_alloc(fl, kernel, sc, args); in fastrpc_internal_invoke()
1171 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); in fastrpc_internal_invoke()
1200 spin_lock(&fl->lock); in fastrpc_internal_invoke()
1202 spin_unlock(&fl->lock); in fastrpc_internal_invoke()
1207 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_internal_invoke()
1209 list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps); in fastrpc_internal_invoke()
1214 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); in fastrpc_internal_invoke()
1219 static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request) in is_session_rejected() argument
1222 if (!fl->is_secure_dev && fl->cctx->secure) { in is_session_rejected()
1228 if (!fl->cctx->unsigned_support || !unsigned_pd_request) { in is_session_rejected()
1229 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n"); in is_session_rejected()
1237 static int fastrpc_init_create_static_process(struct fastrpc_user *fl, in fastrpc_init_create_static_process() argument
1273 if (!fl->cctx->remote_heap) { in fastrpc_init_create_static_process()
1274 err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen, in fastrpc_init_create_static_process()
1275 &fl->cctx->remote_heap); in fastrpc_init_create_static_process()
1280 if (fl->cctx->vmcount) { in fastrpc_init_create_static_process()
1283 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, in fastrpc_init_create_static_process()
1284 (u64)fl->cctx->remote_heap->size, in fastrpc_init_create_static_process()
1286 fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_init_create_static_process()
1288 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", in fastrpc_init_create_static_process()
1289 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); in fastrpc_init_create_static_process()
1296 inbuf.pgid = fl->tgid; in fastrpc_init_create_static_process()
1299 fl->pd = USER_PD; in fastrpc_init_create_static_process()
1309 pages[0].addr = fl->cctx->remote_heap->phys; in fastrpc_init_create_static_process()
1310 pages[0].size = fl->cctx->remote_heap->size; in fastrpc_init_create_static_process()
1318 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, in fastrpc_init_create_static_process()
1328 if (fl->cctx->vmcount && scm_done) { in fastrpc_init_create_static_process()
1333 for (i = 0; i < fl->cctx->vmcount; i++) in fastrpc_init_create_static_process()
1334 src_perms |= BIT(fl->cctx->vmperms[i].vmid); in fastrpc_init_create_static_process()
1338 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, in fastrpc_init_create_static_process()
1339 (u64)fl->cctx->remote_heap->size, in fastrpc_init_create_static_process()
1342 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", in fastrpc_init_create_static_process()
1343 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); in fastrpc_init_create_static_process()
1346 fastrpc_buf_free(fl->cctx->remote_heap); in fastrpc_init_create_static_process()
1355 static int fastrpc_init_create_process(struct fastrpc_user *fl, in fastrpc_init_create_process() argument
1388 if (is_session_rejected(fl, unsigned_module)) { in fastrpc_init_create_process()
1398 inbuf.pgid = fl->tgid; in fastrpc_init_create_process()
1404 fl->pd = USER_PD; in fastrpc_init_create_process()
1407 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map); in fastrpc_init_create_process()
1414 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, in fastrpc_init_create_process()
1419 fl->init_mem = imem; in fastrpc_init_create_process()
1451 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, in fastrpc_init_create_process()
1461 fl->init_mem = NULL; in fastrpc_init_create_process()
1501 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) in fastrpc_release_current_dsp_process() argument
1507 tgid = fl->tgid; in fastrpc_release_current_dsp_process()
1513 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, in fastrpc_release_current_dsp_process()
1519 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_release() local
1520 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_device_release()
1526 fastrpc_release_current_dsp_process(fl); in fastrpc_device_release()
1529 list_del(&fl->user); in fastrpc_device_release()
1532 if (fl->init_mem) in fastrpc_device_release()
1533 fastrpc_buf_free(fl->init_mem); in fastrpc_device_release()
1535 list_for_each_entry_safe(ctx, n, &fl->pending, node) { in fastrpc_device_release()
1540 list_for_each_entry_safe(map, m, &fl->maps, node) in fastrpc_device_release()
1543 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_device_release()
1548 fastrpc_session_free(cctx, fl->sctx); in fastrpc_device_release()
1551 mutex_destroy(&fl->mutex); in fastrpc_device_release()
1552 kfree(fl); in fastrpc_device_release()
1562 struct fastrpc_user *fl = NULL; in fastrpc_device_open() local
1568 fl = kzalloc(sizeof(*fl), GFP_KERNEL); in fastrpc_device_open()
1569 if (!fl) in fastrpc_device_open()
1575 filp->private_data = fl; in fastrpc_device_open()
1576 spin_lock_init(&fl->lock); in fastrpc_device_open()
1577 mutex_init(&fl->mutex); in fastrpc_device_open()
1578 INIT_LIST_HEAD(&fl->pending); in fastrpc_device_open()
1579 INIT_LIST_HEAD(&fl->maps); in fastrpc_device_open()
1580 INIT_LIST_HEAD(&fl->mmaps); in fastrpc_device_open()
1581 INIT_LIST_HEAD(&fl->user); in fastrpc_device_open()
1582 fl->tgid = current->tgid; in fastrpc_device_open()
1583 fl->cctx = cctx; in fastrpc_device_open()
1584 fl->is_secure_dev = fdevice->secure; in fastrpc_device_open()
1586 fl->sctx = fastrpc_session_alloc(cctx); in fastrpc_device_open()
1587 if (!fl->sctx) { in fastrpc_device_open()
1589 mutex_destroy(&fl->mutex); in fastrpc_device_open()
1590 kfree(fl); in fastrpc_device_open()
1596 list_add_tail(&fl->user, &cctx->users); in fastrpc_device_open()
1602 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) in fastrpc_dmabuf_alloc() argument
1612 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); in fastrpc_dmabuf_alloc()
1647 static int fastrpc_init_attach(struct fastrpc_user *fl, int pd) in fastrpc_init_attach() argument
1650 int tgid = fl->tgid; in fastrpc_init_attach()
1657 fl->pd = pd; in fastrpc_init_attach()
1659 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, in fastrpc_init_attach()
1663 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) in fastrpc_invoke() argument
1687 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args); in fastrpc_invoke()
1693 static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf, in fastrpc_get_info_from_dsp() argument
1713 return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE, in fastrpc_get_info_from_dsp()
1718 struct fastrpc_user *fl) in fastrpc_get_info_from_kernel() argument
1720 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_get_info_from_kernel()
1739 err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES); in fastrpc_get_info_from_kernel()
1761 static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) in fastrpc_get_dsp_info() argument
1771 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n", in fastrpc_get_dsp_info()
1778 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err); in fastrpc_get_dsp_info()
1783 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n", in fastrpc_get_dsp_info()
1788 err = fastrpc_get_info_from_kernel(&cap, fl); in fastrpc_get_dsp_info()
1798 static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf) in fastrpc_req_munmap_impl() argument
1802 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap_impl()
1806 req_msg.pgid = fl->tgid; in fastrpc_req_munmap_impl()
1814 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, in fastrpc_req_munmap_impl()
1818 spin_lock(&fl->lock); in fastrpc_req_munmap_impl()
1820 spin_unlock(&fl->lock); in fastrpc_req_munmap_impl()
1829 static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp) in fastrpc_req_munmap() argument
1833 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap()
1838 spin_lock(&fl->lock); in fastrpc_req_munmap()
1839 list_for_each_entry_safe(iter, b, &fl->mmaps, node) { in fastrpc_req_munmap()
1845 spin_unlock(&fl->lock); in fastrpc_req_munmap()
1853 return fastrpc_req_munmap_impl(fl, buf); in fastrpc_req_munmap()
1856 static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) in fastrpc_req_mmap() argument
1864 struct device *dev = fl->sctx->dev; in fastrpc_req_mmap()
1883 err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf); in fastrpc_req_mmap()
1885 err = fastrpc_buf_alloc(fl, dev, req.size, &buf); in fastrpc_req_mmap()
1892 req_msg.pgid = fl->tgid; in fastrpc_req_mmap()
1910 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, in fastrpc_req_mmap()
1925 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { in fastrpc_req_mmap()
1929 &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_req_mmap()
1931 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", in fastrpc_req_mmap()
1937 spin_lock(&fl->lock); in fastrpc_req_mmap()
1938 list_add_tail(&buf->node, &fl->mmaps); in fastrpc_req_mmap()
1939 spin_unlock(&fl->lock); in fastrpc_req_mmap()
1952 fastrpc_req_munmap_impl(fl, buf); in fastrpc_req_mmap()
1957 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req) in fastrpc_req_mem_unmap_impl() argument
1964 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_unmap_impl()
1966 spin_lock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1967 list_for_each_entry_safe(iter, m, &fl->maps, node) { in fastrpc_req_mem_unmap_impl()
1974 spin_unlock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1981 req_msg.pgid = fl->tgid; in fastrpc_req_mem_unmap_impl()
1990 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, in fastrpc_req_mem_unmap_impl()
2001 static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp) in fastrpc_req_mem_unmap() argument
2008 return fastrpc_req_mem_unmap_impl(fl, &req); in fastrpc_req_mem_unmap()
2011 static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) in fastrpc_req_mem_map() argument
2019 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_map()
2028 err = fastrpc_map_create(fl, req.fd, req.length, 0, &map); in fastrpc_req_mem_map()
2034 req_msg.pgid = fl->tgid; in fastrpc_req_mem_map()
2059 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); in fastrpc_req_mem_map()
2076 fastrpc_req_mem_unmap_impl(fl, &req_unmap); in fastrpc_req_mem_map()
2091 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_ioctl() local
2097 err = fastrpc_invoke(fl, argp); in fastrpc_device_ioctl()
2100 err = fastrpc_init_attach(fl, ROOT_PD); in fastrpc_device_ioctl()
2103 err = fastrpc_init_attach(fl, SENSORS_PD); in fastrpc_device_ioctl()
2106 err = fastrpc_init_create_static_process(fl, argp); in fastrpc_device_ioctl()
2109 err = fastrpc_init_create_process(fl, argp); in fastrpc_device_ioctl()
2112 err = fastrpc_dmabuf_alloc(fl, argp); in fastrpc_device_ioctl()
2115 err = fastrpc_req_mmap(fl, argp); in fastrpc_device_ioctl()
2118 err = fastrpc_req_munmap(fl, argp); in fastrpc_device_ioctl()
2121 err = fastrpc_req_mem_map(fl, argp); in fastrpc_device_ioctl()
2124 err = fastrpc_req_mem_unmap(fl, argp); in fastrpc_device_ioctl()
2127 err = fastrpc_get_dsp_info(fl, argp); in fastrpc_device_ioctl()