On 12/1/21 11:23 AM, Reinette Chatre wrote:
+static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
+ struct sgx_encl *encl, unsigned long addr)
+{
+ struct sgx_pageinfo pginfo = {0};
+ struct sgx_encl_page *encl_page;
+ struct sgx_epc_page *epc_page;
+ struct sgx_va_page *va_page;
+ unsigned long phys_addr;
+ unsigned long prot;
+ vm_fault_t vmret;
+ int ret;
+
+ if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
+ return VM_FAULT_SIGBUS;
+
+ encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
+ if (!encl_page)
+ return VM_FAULT_OOM;
+
+ encl_page->desc = addr;
+ encl_page->encl = encl;
+
+ /*
+ * Adding a regular page that is architecturally allowed to only
+ * be created with RW permissions.
+ * TBD: Interface with user space policy to support max permissions
+ * of RWX.
+ */
+ prot = PROT_READ | PROT_WRITE;
+ encl_page->vm_run_prot_bits = calc_vm_prot_bits(prot, 0);
+ encl_page->vm_max_prot_bits = encl_page->vm_run_prot_bits;
+
+ epc_page = sgx_alloc_epc_page(encl_page, true);
+ if (IS_ERR(epc_page)) {
+ kfree(encl_page);
+ return VM_FAULT_SIGBUS;
+ }
+
+ va_page = sgx_encl_grow(encl);
+ if (IS_ERR(va_page)) {
+ ret = PTR_ERR(va_page);
+ goto err_out_free;
+ }
+
+ mutex_lock(&encl->lock);
+
+ /*
+ * Copy comment from sgx_encl_add_page() to maintain guidance in
+ * this similar flow:
+ * Adding to encl->va_pages must be done under encl->lock. Ditto for
+ * deleting (via sgx_encl_shrink()) in the error path.
+ */
+ if (va_page)
+ list_add(&va_page->list, &encl->va_pages);
+
+ ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
+ encl_page, GFP_KERNEL);
+ /*
+ * If ret == -EBUSY then page was created in another flow while
+ * running without encl->lock
+ */
+ if (ret)
+ goto err_out_unlock;
+
+ pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
+ pginfo.addr = encl_page->desc & PAGE_MASK;
+ pginfo.metadata = 0;
+
+ ret = __eaug(&pginfo, sgx_get_epc_virt_addr(epc_page));
+ if (ret)
+ goto err_out;
+
+ encl_page->encl = encl;
+ encl_page->epc_page = epc_page;
+ encl_page->type = SGX_PAGE_TYPE_REG;
+ encl->secs_child_cnt++;
+
+ sgx_mark_page_reclaimable(encl_page->epc_page);
+
+ phys_addr = sgx_get_epc_phys_addr(epc_page);
+ /*
+ * Do not undo everything when creating PTE entry fails - next #PF
+ * would find page ready for a PTE.
+ * PAGE_SHARED because protection is forced to be RW above and COW
+ * is not supported.
+ */
+ vmret = vmf_insert_pfn_prot(vma, addr, PFN_DOWN(phys_addr),
+ PAGE_SHARED);
+ if (vmret != VM_FAULT_NOPAGE) {
+ mutex_unlock(&encl->lock);
+ return VM_FAULT_SIGBUS;
+ }
+ mutex_unlock(&encl->lock);
+ return VM_FAULT_NOPAGE;
+
+err_out:
+ xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
+
+err_out_unlock:
+ sgx_encl_shrink(encl, va_page);
+ mutex_unlock(&encl->lock);
+
+err_out_free:
+ sgx_encl_free_epc_page(epc_page);
+ kfree(encl_page);
+
+ return VM_FAULT_SIGBUS;
+}
There seems to be very little code sharing between this and the existing
page addition. Are we confident that no refactoring here is in order?