[PATCH mm-stable v2 1/3] mm/ksm: unmerge and clear VM_MERGEABLE when setting PR_SET_MEMORY_MERGE=0

From: David Hildenbrand
Date: Sat Apr 22 2023 - 16:56:09 EST


Let's unmerge any KSM pages when setting PR_SET_MEMORY_MERGE=0, and clear
the VM_MERGEABLE flag from all VMAs -- just like KSM would. Of course,
only do that if we previously set PR_SET_MEMORY_MERGE=1.

Acked-by: Stefan Roesch <shr@xxxxxxxxxxxx>
Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
---
include/linux/ksm.h | 1 +
kernel/sys.c | 12 +++------
mm/ksm.c | 59 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 63 insertions(+), 9 deletions(-)

diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 7a9b76fb6c3f..429efa6ff4ae 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -21,6 +21,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,

void ksm_add_vma(struct vm_area_struct *vma);
int ksm_enable_merge_any(struct mm_struct *mm);
+int ksm_disable_merge_any(struct mm_struct *mm);

int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
diff --git a/kernel/sys.c b/kernel/sys.c
index 72cdb16e2636..339fee3eff6a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2695,16 +2695,10 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
if (mmap_write_lock_killable(me->mm))
return -EINTR;

- if (arg2) {
+ if (arg2)
error = ksm_enable_merge_any(me->mm);
- } else {
- /*
- * TODO: we might want disable KSM on all VMAs and
- * trigger unsharing to completely disable KSM.
- */
- clear_bit(MMF_VM_MERGE_ANY, &me->mm->flags);
- error = 0;
- }
+ else
+ error = ksm_disable_merge_any(me->mm);
mmap_write_unlock(me->mm);
break;
case PR_GET_MEMORY_MERGE:
diff --git a/mm/ksm.c b/mm/ksm.c
index 9e48258985d2..823bb3475a68 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2520,6 +2520,22 @@ static void __ksm_add_vma(struct vm_area_struct *vma)
vm_flags_set(vma, VM_MERGEABLE);
}

+static int __ksm_del_vma(struct vm_area_struct *vma)
+{
+ int err;
+
+ if (!(vma->vm_flags & VM_MERGEABLE))
+ return 0;
+
+ if (vma->anon_vma) {
+ err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end);
+ if (err)
+ return err;
+ }
+
+ vm_flags_clear(vma, VM_MERGEABLE);
+ return 0;
+}
/**
* ksm_add_vma - Mark vma as mergeable if compatible
*
@@ -2542,6 +2558,20 @@ static void ksm_add_vmas(struct mm_struct *mm)
__ksm_add_vma(vma);
}

+static int ksm_del_vmas(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ int err;
+
+ VMA_ITERATOR(vmi, mm, 0);
+ for_each_vma(vmi, vma) {
+ err = __ksm_del_vma(vma);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
/**
* ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
* compatible VMA's
@@ -2569,6 +2599,35 @@ int ksm_enable_merge_any(struct mm_struct *mm)
return 0;
}

+/**
+ * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
+ * previously enabled via ksm_enable_merge_any().
+ *
+ * Disabling merging implies unmerging any merged pages, like setting
+ * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
+ * merging on all compatible VMA's remains enabled.
+ *
+ * @mm: Pointer to mm
+ *
+ * Returns 0 on success, otherwise error code
+ */
+int ksm_disable_merge_any(struct mm_struct *mm)
+{
+ int err;
+
+ if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ return 0;
+
+ err = ksm_del_vmas(mm);
+ if (err) {
+ ksm_add_vmas(mm);
+ return err;
+ }
+
+ clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
+ return 0;
+}
+
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
{
--
2.40.0