Lines Matching refs:fl

196 	struct fastrpc_user *fl;  member
218 struct fastrpc_user *fl; member
247 struct fastrpc_user *fl; member
320 int vmid = map->fl->cctx->vmperms[0].vmid; in fastrpc_free_map()
329 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", in fastrpc_free_map()
340 if (map->fl) { in fastrpc_free_map()
341 spin_lock(&map->fl->lock); in fastrpc_free_map()
343 spin_unlock(&map->fl->lock); in fastrpc_free_map()
344 map->fl = NULL; in fastrpc_free_map()
365 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, in fastrpc_map_lookup() argument
368 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_lookup()
372 spin_lock(&fl->lock); in fastrpc_map_lookup()
373 list_for_each_entry(map, &fl->maps, node) { in fastrpc_map_lookup()
390 spin_unlock(&fl->lock); in fastrpc_map_lookup()
402 static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, in __fastrpc_buf_alloc() argument
415 buf->fl = fl; in __fastrpc_buf_alloc()
435 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, in fastrpc_buf_alloc() argument
441 ret = __fastrpc_buf_alloc(fl, dev, size, obuf); in fastrpc_buf_alloc()
447 if (fl->sctx && fl->sctx->sid) in fastrpc_buf_alloc()
448 buf->phys += ((u64)fl->sctx->sid << 32); in fastrpc_buf_alloc()
453 static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev, in fastrpc_remote_heap_alloc() argument
456 struct device *rdev = &fl->cctx->rpdev->dev; in fastrpc_remote_heap_alloc()
458 return __fastrpc_buf_alloc(fl, rdev, size, obuf); in fastrpc_remote_heap_alloc()
588 ctx->fl = user; in fastrpc_context_alloc()
755 static int fastrpc_map_create(struct fastrpc_user *fl, int fd, in fastrpc_map_create() argument
758 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_create()
763 if (!fastrpc_map_lookup(fl, fd, ppmap, true)) in fastrpc_map_create()
773 map->fl = fl; in fastrpc_map_create()
799 map->phys += ((u64)fl->sctx->sid << 32); in fastrpc_map_create()
815 dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; in fastrpc_map_create()
825 spin_lock(&fl->lock); in fastrpc_map_create()
826 list_add_tail(&map->node, &fl->maps); in fastrpc_map_create()
827 spin_unlock(&fl->lock); in fastrpc_map_create()
905 struct device *dev = ctx->fl->sctx->dev; in fastrpc_create_maps()
914 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, in fastrpc_create_maps()
937 struct device *dev = ctx->fl->sctx->dev; in fastrpc_get_args()
957 if (ctx->fl->sctx->sid) in fastrpc_get_args()
958 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
960 err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
1068 struct fastrpc_user *fl = ctx->fl; in fastrpc_put_args() local
1101 if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) in fastrpc_put_args()
1113 struct fastrpc_user *fl = ctx->fl; in fastrpc_invoke_send() local
1117 cctx = fl->cctx; in fastrpc_invoke_send()
1118 msg->client_id = fl->client_id; in fastrpc_invoke_send()
1124 msg->ctx = ctx->ctxid | fl->pd; in fastrpc_invoke_send()
1140 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, in fastrpc_internal_invoke() argument
1149 if (!fl->sctx) in fastrpc_internal_invoke()
1152 if (!fl->cctx->rpdev) in fastrpc_internal_invoke()
1156 …dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle… in fastrpc_internal_invoke()
1160 ctx = fastrpc_context_alloc(fl, kernel, sc, args); in fastrpc_internal_invoke()
1171 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); in fastrpc_internal_invoke()
1200 spin_lock(&fl->lock); in fastrpc_internal_invoke()
1202 spin_unlock(&fl->lock); in fastrpc_internal_invoke()
1207 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_internal_invoke()
1209 list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps); in fastrpc_internal_invoke()
1214 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); in fastrpc_internal_invoke()
1219 static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request) in is_session_rejected() argument
1222 if (!fl->is_secure_dev && fl->cctx->secure) { in is_session_rejected()
1228 if (!fl->cctx->unsigned_support || !unsigned_pd_request) { in is_session_rejected()
1229 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n"); in is_session_rejected()
1237 static int fastrpc_init_create_static_process(struct fastrpc_user *fl, in fastrpc_init_create_static_process() argument
1273 if (!fl->cctx->remote_heap) { in fastrpc_init_create_static_process()
1274 err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen, in fastrpc_init_create_static_process()
1275 &fl->cctx->remote_heap); in fastrpc_init_create_static_process()
1280 if (fl->cctx->vmcount) { in fastrpc_init_create_static_process()
1283 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, in fastrpc_init_create_static_process()
1284 (u64)fl->cctx->remote_heap->size, in fastrpc_init_create_static_process()
1286 fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_init_create_static_process()
1288 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", in fastrpc_init_create_static_process()
1289 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); in fastrpc_init_create_static_process()
1296 inbuf.client_id = fl->client_id; in fastrpc_init_create_static_process()
1299 fl->pd = USER_PD; in fastrpc_init_create_static_process()
1309 pages[0].addr = fl->cctx->remote_heap->phys; in fastrpc_init_create_static_process()
1310 pages[0].size = fl->cctx->remote_heap->size; in fastrpc_init_create_static_process()
1318 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, in fastrpc_init_create_static_process()
1328 if (fl->cctx->vmcount && scm_done) { in fastrpc_init_create_static_process()
1333 for (i = 0; i < fl->cctx->vmcount; i++) in fastrpc_init_create_static_process()
1334 src_perms |= BIT(fl->cctx->vmperms[i].vmid); in fastrpc_init_create_static_process()
1338 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, in fastrpc_init_create_static_process()
1339 (u64)fl->cctx->remote_heap->size, in fastrpc_init_create_static_process()
1342 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", in fastrpc_init_create_static_process()
1343 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); in fastrpc_init_create_static_process()
1346 fastrpc_buf_free(fl->cctx->remote_heap); in fastrpc_init_create_static_process()
1355 static int fastrpc_init_create_process(struct fastrpc_user *fl, in fastrpc_init_create_process() argument
1388 if (is_session_rejected(fl, unsigned_module)) { in fastrpc_init_create_process()
1398 inbuf.client_id = fl->client_id; in fastrpc_init_create_process()
1404 fl->pd = USER_PD; in fastrpc_init_create_process()
1407 err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map); in fastrpc_init_create_process()
1414 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, in fastrpc_init_create_process()
1419 fl->init_mem = imem; in fastrpc_init_create_process()
1451 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, in fastrpc_init_create_process()
1461 fl->init_mem = NULL; in fastrpc_init_create_process()
1472 struct fastrpc_user *fl) in fastrpc_session_alloc() argument
1474 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_session_alloc()
1485 fl->client_id = i + 1; in fastrpc_session_alloc()
1504 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) in fastrpc_release_current_dsp_process() argument
1510 client_id = fl->client_id; in fastrpc_release_current_dsp_process()
1516 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, in fastrpc_release_current_dsp_process()
1522 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_release() local
1523 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_device_release()
1529 fastrpc_release_current_dsp_process(fl); in fastrpc_device_release()
1532 list_del(&fl->user); in fastrpc_device_release()
1535 if (fl->init_mem) in fastrpc_device_release()
1536 fastrpc_buf_free(fl->init_mem); in fastrpc_device_release()
1538 list_for_each_entry_safe(ctx, n, &fl->pending, node) { in fastrpc_device_release()
1543 list_for_each_entry_safe(map, m, &fl->maps, node) in fastrpc_device_release()
1546 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_device_release()
1551 fastrpc_session_free(cctx, fl->sctx); in fastrpc_device_release()
1554 mutex_destroy(&fl->mutex); in fastrpc_device_release()
1555 kfree(fl); in fastrpc_device_release()
1565 struct fastrpc_user *fl = NULL; in fastrpc_device_open() local
1571 fl = kzalloc(sizeof(*fl), GFP_KERNEL); in fastrpc_device_open()
1572 if (!fl) in fastrpc_device_open()
1578 filp->private_data = fl; in fastrpc_device_open()
1579 spin_lock_init(&fl->lock); in fastrpc_device_open()
1580 mutex_init(&fl->mutex); in fastrpc_device_open()
1581 INIT_LIST_HEAD(&fl->pending); in fastrpc_device_open()
1582 INIT_LIST_HEAD(&fl->maps); in fastrpc_device_open()
1583 INIT_LIST_HEAD(&fl->mmaps); in fastrpc_device_open()
1584 INIT_LIST_HEAD(&fl->user); in fastrpc_device_open()
1585 fl->cctx = cctx; in fastrpc_device_open()
1586 fl->is_secure_dev = fdevice->secure; in fastrpc_device_open()
1588 fl->sctx = fastrpc_session_alloc(fl); in fastrpc_device_open()
1589 if (!fl->sctx) { in fastrpc_device_open()
1591 mutex_destroy(&fl->mutex); in fastrpc_device_open()
1592 kfree(fl); in fastrpc_device_open()
1598 list_add_tail(&fl->user, &cctx->users); in fastrpc_device_open()
1604 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) in fastrpc_dmabuf_alloc() argument
1614 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); in fastrpc_dmabuf_alloc()
1649 static int fastrpc_init_attach(struct fastrpc_user *fl, int pd) in fastrpc_init_attach() argument
1652 int client_id = fl->client_id; in fastrpc_init_attach()
1659 fl->pd = pd; in fastrpc_init_attach()
1661 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, in fastrpc_init_attach()
1665 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) in fastrpc_invoke() argument
1689 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args); in fastrpc_invoke()
1695 static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf, in fastrpc_get_info_from_dsp() argument
1715 return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE, in fastrpc_get_info_from_dsp()
1720 struct fastrpc_user *fl) in fastrpc_get_info_from_kernel() argument
1722 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_get_info_from_kernel()
1741 err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES); in fastrpc_get_info_from_kernel()
1763 static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) in fastrpc_get_dsp_info() argument
1773 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n", in fastrpc_get_dsp_info()
1780 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err); in fastrpc_get_dsp_info()
1785 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n", in fastrpc_get_dsp_info()
1790 err = fastrpc_get_info_from_kernel(&cap, fl); in fastrpc_get_dsp_info()
1800 static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf) in fastrpc_req_munmap_impl() argument
1804 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap_impl()
1808 req_msg.client_id = fl->client_id; in fastrpc_req_munmap_impl()
1816 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, in fastrpc_req_munmap_impl()
1820 spin_lock(&fl->lock); in fastrpc_req_munmap_impl()
1822 spin_unlock(&fl->lock); in fastrpc_req_munmap_impl()
1831 static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp) in fastrpc_req_munmap() argument
1835 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap()
1840 spin_lock(&fl->lock); in fastrpc_req_munmap()
1841 list_for_each_entry_safe(iter, b, &fl->mmaps, node) { in fastrpc_req_munmap()
1847 spin_unlock(&fl->lock); in fastrpc_req_munmap()
1855 return fastrpc_req_munmap_impl(fl, buf); in fastrpc_req_munmap()
1858 static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) in fastrpc_req_mmap() argument
1866 struct device *dev = fl->sctx->dev; in fastrpc_req_mmap()
1885 err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf); in fastrpc_req_mmap()
1887 err = fastrpc_buf_alloc(fl, dev, req.size, &buf); in fastrpc_req_mmap()
1894 req_msg.client_id = fl->client_id; in fastrpc_req_mmap()
1912 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, in fastrpc_req_mmap()
1927 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { in fastrpc_req_mmap()
1931 &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_req_mmap()
1933 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", in fastrpc_req_mmap()
1939 spin_lock(&fl->lock); in fastrpc_req_mmap()
1940 list_add_tail(&buf->node, &fl->mmaps); in fastrpc_req_mmap()
1941 spin_unlock(&fl->lock); in fastrpc_req_mmap()
1954 fastrpc_req_munmap_impl(fl, buf); in fastrpc_req_mmap()
1959 static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req) in fastrpc_req_mem_unmap_impl() argument
1966 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_unmap_impl()
1968 spin_lock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1969 list_for_each_entry_safe(iter, m, &fl->maps, node) { in fastrpc_req_mem_unmap_impl()
1976 spin_unlock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1983 req_msg.client_id = fl->client_id; in fastrpc_req_mem_unmap_impl()
1992 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, in fastrpc_req_mem_unmap_impl()
2003 static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp) in fastrpc_req_mem_unmap() argument
2010 return fastrpc_req_mem_unmap_impl(fl, &req); in fastrpc_req_mem_unmap()
2013 static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) in fastrpc_req_mem_map() argument
2021 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_map()
2030 err = fastrpc_map_create(fl, req.fd, req.length, 0, &map); in fastrpc_req_mem_map()
2036 req_msg.client_id = fl->client_id; in fastrpc_req_mem_map()
2061 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); in fastrpc_req_mem_map()
2078 fastrpc_req_mem_unmap_impl(fl, &req_unmap); in fastrpc_req_mem_map()
2093 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_ioctl() local
2099 err = fastrpc_invoke(fl, argp); in fastrpc_device_ioctl()
2102 err = fastrpc_init_attach(fl, ROOT_PD); in fastrpc_device_ioctl()
2105 err = fastrpc_init_attach(fl, SENSORS_PD); in fastrpc_device_ioctl()
2108 err = fastrpc_init_create_static_process(fl, argp); in fastrpc_device_ioctl()
2111 err = fastrpc_init_create_process(fl, argp); in fastrpc_device_ioctl()
2114 err = fastrpc_dmabuf_alloc(fl, argp); in fastrpc_device_ioctl()
2117 err = fastrpc_req_mmap(fl, argp); in fastrpc_device_ioctl()
2120 err = fastrpc_req_munmap(fl, argp); in fastrpc_device_ioctl()
2123 err = fastrpc_req_mem_map(fl, argp); in fastrpc_device_ioctl()
2126 err = fastrpc_req_mem_unmap(fl, argp); in fastrpc_device_ioctl()
2129 err = fastrpc_get_dsp_info(fl, argp); in fastrpc_device_ioctl()