Lines Matching refs:encl

15 static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
58 static int reclaimer_writing_to_pcmd(struct sgx_encl *encl, in reclaimer_writing_to_pcmd() argument
83 if (addr == encl->base + encl->size) in reclaimer_writing_to_pcmd()
86 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); in reclaimer_writing_to_pcmd()
110 static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl, in sgx_encl_get_backing_page_pcmd_offset() argument
113 pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs); in sgx_encl_get_backing_page_pcmd_offset()
121 static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index) in sgx_encl_truncate_backing_page() argument
123 struct inode *inode = file_inode(encl->backing); in sgx_encl_truncate_backing_page()
137 struct sgx_encl *encl = encl_page->encl; in __sgx_encl_eldu() local
147 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); in __sgx_encl_eldu()
149 page_index = PFN_DOWN(encl->size); in __sgx_encl_eldu()
154 pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base; in __sgx_encl_eldu()
156 page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); in __sgx_encl_eldu()
158 ret = sgx_encl_lookup_backing(encl, page_index, &b); in __sgx_encl_eldu()
196 sgx_encl_truncate_backing_page(encl, page_index); in __sgx_encl_eldu()
198 if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) { in __sgx_encl_eldu()
199 sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off)); in __sgx_encl_eldu()
216 struct sgx_encl *encl = encl_page->encl; in sgx_encl_eldu() local
231 list_move(&encl_page->va_page->list, &encl->va_pages); in sgx_encl_eldu()
238 static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl, in __sgx_encl_load_page() argument
251 if (!(encl->secs.epc_page)) { in __sgx_encl_load_page()
252 epc_page = sgx_encl_eldu(&encl->secs, NULL); in __sgx_encl_load_page()
257 epc_page = sgx_encl_eldu(entry, encl->secs.epc_page); in __sgx_encl_load_page()
261 encl->secs_child_cnt++; in __sgx_encl_load_page()
267 static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl, in sgx_encl_load_page_in_vma() argument
274 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); in sgx_encl_load_page_in_vma()
286 return __sgx_encl_load_page(encl, entry); in sgx_encl_load_page_in_vma()
289 struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl, in sgx_encl_load_page() argument
294 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); in sgx_encl_load_page()
298 return __sgx_encl_load_page(encl, entry); in sgx_encl_load_page()
315 struct sgx_encl *encl, unsigned long addr) in sgx_encl_eaug_page() argument
326 if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) in sgx_encl_eaug_page()
336 encl_page = sgx_encl_page_alloc(encl, addr - encl->base, secinfo_flags); in sgx_encl_eaug_page()
340 mutex_lock(&encl->lock); in sgx_encl_eaug_page()
349 va_page = sgx_encl_grow(encl, false); in sgx_encl_eaug_page()
357 list_add(&va_page->list, &encl->va_pages); in sgx_encl_eaug_page()
359 ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc), in sgx_encl_eaug_page()
368 pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page); in sgx_encl_eaug_page()
376 encl_page->encl = encl; in sgx_encl_eaug_page()
379 encl->secs_child_cnt++; in sgx_encl_eaug_page()
390 mutex_unlock(&encl->lock); in sgx_encl_eaug_page()
393 mutex_unlock(&encl->lock); in sgx_encl_eaug_page()
397 xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc)); in sgx_encl_eaug_page()
400 sgx_encl_shrink(encl, va_page); in sgx_encl_eaug_page()
404 mutex_unlock(&encl->lock); in sgx_encl_eaug_page()
416 struct sgx_encl *encl; in sgx_vma_fault() local
419 encl = vma->vm_private_data; in sgx_vma_fault()
426 if (unlikely(!encl)) in sgx_vma_fault()
437 (!xa_load(&encl->page_array, PFN_DOWN(addr)))) in sgx_vma_fault()
438 return sgx_encl_eaug_page(vma, encl, addr); in sgx_vma_fault()
440 mutex_lock(&encl->lock); in sgx_vma_fault()
442 entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags); in sgx_vma_fault()
444 mutex_unlock(&encl->lock); in sgx_vma_fault()
456 mutex_unlock(&encl->lock); in sgx_vma_fault()
462 mutex_unlock(&encl->lock); in sgx_vma_fault()
469 struct sgx_encl *encl = vma->vm_private_data; in sgx_vma_open() local
476 if (unlikely(!encl)) in sgx_vma_open()
479 if (sgx_encl_mm_add(encl, vma->vm_mm)) in sgx_vma_open()
502 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, in sgx_encl_may_map() argument
510 XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); in sgx_encl_may_map()
513 if (test_bit(SGX_ENCL_INITIALIZED, &encl->flags) && in sgx_encl_may_map()
514 (start < encl->base || end > encl->base + encl->size)) in sgx_encl_may_map()
524 mutex_lock(&encl->lock); in sgx_encl_may_map()
536 mutex_unlock(&encl->lock); in sgx_encl_may_map()
540 mutex_lock(&encl->lock); in sgx_encl_may_map()
545 mutex_unlock(&encl->lock); in sgx_encl_may_map()
556 static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page, in sgx_encl_debug_read() argument
570 static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page, in sgx_encl_debug_write() argument
586 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, in sgx_encl_reserve_page() argument
593 mutex_lock(&encl->lock); in sgx_encl_reserve_page()
595 entry = sgx_encl_load_page_in_vma(encl, addr, vm_flags); in sgx_encl_reserve_page()
599 mutex_unlock(&encl->lock); in sgx_encl_reserve_page()
603 mutex_unlock(&encl->lock); in sgx_encl_reserve_page()
611 struct sgx_encl *encl = vma->vm_private_data; in sgx_vma_access() local
624 if (!encl) in sgx_vma_access()
627 if (!test_bit(SGX_ENCL_DEBUG, &encl->flags)) in sgx_vma_access()
631 entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK, in sgx_vma_access()
643 ret = sgx_encl_debug_read(encl, entry, align, data); in sgx_vma_access()
649 ret = sgx_encl_debug_write(encl, entry, align, data); in sgx_vma_access()
657 mutex_unlock(&encl->lock); in sgx_vma_access()
682 struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); in sgx_encl_release() local
683 unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1); in sgx_encl_release()
688 XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base)); in sgx_encl_release()
701 encl->secs_child_cnt--; in sgx_encl_release()
721 xa_destroy(&encl->page_array); in sgx_encl_release()
723 if (!encl->secs_child_cnt && encl->secs.epc_page) { in sgx_encl_release()
724 sgx_encl_free_epc_page(encl->secs.epc_page); in sgx_encl_release()
725 encl->secs.epc_page = NULL; in sgx_encl_release()
728 while (!list_empty(&encl->va_pages)) { in sgx_encl_release()
729 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, in sgx_encl_release()
736 if (encl->backing) in sgx_encl_release()
737 fput(encl->backing); in sgx_encl_release()
739 cleanup_srcu_struct(&encl->srcu); in sgx_encl_release()
741 WARN_ON_ONCE(!list_empty(&encl->mm_list)); in sgx_encl_release()
744 WARN_ON_ONCE(encl->secs_child_cnt); in sgx_encl_release()
745 WARN_ON_ONCE(encl->secs.epc_page); in sgx_encl_release()
747 kfree(encl); in sgx_encl_release()
763 spin_lock(&encl_mm->encl->mm_lock); in sgx_mmu_notifier_release()
764 list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) { in sgx_mmu_notifier_release()
770 spin_unlock(&encl_mm->encl->mm_lock); in sgx_mmu_notifier_release()
773 synchronize_srcu(&encl_mm->encl->srcu); in sgx_mmu_notifier_release()
783 kref_put(&encl_mm->encl->refcount, sgx_encl_release); in sgx_mmu_notifier_free()
793 static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl, in sgx_encl_find_mm() argument
800 idx = srcu_read_lock(&encl->srcu); in sgx_encl_find_mm()
802 list_for_each_entry_rcu(tmp, &encl->mm_list, list) { in sgx_encl_find_mm()
809 srcu_read_unlock(&encl->srcu, idx); in sgx_encl_find_mm()
814 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) in sgx_encl_mm_add() argument
831 if (sgx_encl_find_mm(encl, mm)) in sgx_encl_mm_add()
839 kref_get(&encl->refcount); in sgx_encl_mm_add()
840 encl_mm->encl = encl; in sgx_encl_mm_add()
850 spin_lock(&encl->mm_lock); in sgx_encl_mm_add()
851 list_add_rcu(&encl_mm->list, &encl->mm_list); in sgx_encl_mm_add()
854 encl->mm_list_version++; in sgx_encl_mm_add()
855 spin_unlock(&encl->mm_lock); in sgx_encl_mm_add()
903 const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl) in sgx_encl_cpumask() argument
905 cpumask_t *cpumask = &encl->cpumask; in sgx_encl_cpumask()
911 idx = srcu_read_lock(&encl->srcu); in sgx_encl_cpumask()
913 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { in sgx_encl_cpumask()
922 srcu_read_unlock(&encl->srcu, idx); in sgx_encl_cpumask()
927 static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, in sgx_encl_get_backing_page() argument
930 struct address_space *mapping = encl->backing->f_mapping; in sgx_encl_get_backing_page()
949 static int __sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, in __sgx_encl_get_backing() argument
952 pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); in __sgx_encl_get_backing()
956 contents = sgx_encl_get_backing_page(encl, page_index); in __sgx_encl_get_backing()
960 pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off)); in __sgx_encl_get_backing()
978 static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl) in sgx_encl_get_mem_cgroup() argument
996 idx = srcu_read_lock(&encl->srcu); in sgx_encl_get_mem_cgroup()
998 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { in sgx_encl_get_mem_cgroup()
1009 srcu_read_unlock(&encl->srcu, idx); in sgx_encl_get_mem_cgroup()
1040 int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index, in sgx_encl_alloc_backing() argument
1043 struct mem_cgroup *encl_memcg = sgx_encl_get_mem_cgroup(encl); in sgx_encl_alloc_backing()
1047 ret = __sgx_encl_get_backing(encl, page_index, backing); in sgx_encl_alloc_backing()
1072 static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index, in sgx_encl_lookup_backing() argument
1075 return __sgx_encl_get_backing(encl, page_index, backing); in sgx_encl_lookup_backing()
1117 struct sgx_encl *encl = page->encl; in sgx_encl_test_and_clear_young() local
1125 if (encl != vma->vm_private_data) in sgx_encl_test_and_clear_young()
1136 struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl, in sgx_encl_page_alloc() argument
1147 encl_page->desc = encl->base + offset; in sgx_encl_page_alloc()
1148 encl_page->encl = encl; in sgx_encl_page_alloc()
1177 void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr) in sgx_zap_enclave_ptes() argument
1185 mm_list_version = encl->mm_list_version; in sgx_zap_enclave_ptes()
1190 idx = srcu_read_lock(&encl->srcu); in sgx_zap_enclave_ptes()
1192 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { in sgx_zap_enclave_ptes()
1199 if (!ret && encl == vma->vm_private_data) in sgx_zap_enclave_ptes()
1207 srcu_read_unlock(&encl->srcu, idx); in sgx_zap_enclave_ptes()
1208 } while (unlikely(encl->mm_list_version != mm_list_version)); in sgx_zap_enclave_ptes()