[PATCH] fold me "mm, oom: fortify task_will_free_mem"

From: Michal Hocko
Date: Tue May 31 2016 - 01:33:06 EST


As per Oleg
- uninline task_will_free_mem
- reorganize checks and simplify __task_will_free_mem
- add missing process_shares_mm in task_will_free_mem
- add same_thread_group to task_will_free_mem for clarity

Signed-off-by: Michal Hocko <mhocko@xxxxxxxx>
---
include/linux/oom.h | 64 +++++------------------------------------------------
mm/oom_kill.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 62 insertions(+), 58 deletions(-)

diff --git a/include/linux/oom.h b/include/linux/oom.h
index c4cc0591d959..f3ac9d088645 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -119,69 +119,17 @@ static inline bool __task_will_free_mem(struct task_struct *task)
if (sig->flags & SIGNAL_GROUP_COREDUMP)
return false;

- if (!(task->flags & PF_EXITING || fatal_signal_pending(task)))
- return false;
-
- /* Make sure that the whole thread group is going down */
- if (!thread_group_empty(task) &&
- !(sig->flags & SIGNAL_GROUP_EXIT || fatal_signal_pending(task)))
- return false;
-
- return true;
-}
-
-/*
- * Checks whether the given task is dying or exiting and likely to
- * release its address space. This means that all threads and processes
- * sharing the same mm have to be killed or exiting.
- */
-static inline bool task_will_free_mem(struct task_struct *task)
-{
- struct mm_struct *mm = NULL;
- struct task_struct *p;
- bool ret;
-
- /*
- * If the process has passed exit_mm we have to skip it because
- * we have lost a link to other tasks sharing this mm, we do not
- * have anything to reap and the task might then get stuck waiting
- * for parent as zombie and we do not want it to hold TIF_MEMDIE
- */
- p = find_lock_task_mm(task);
- if (!p)
- return false;
-
- if (!__task_will_free_mem(p)) {
- task_unlock(p);
- return false;
- }
-
- mm = p->mm;
- if (atomic_read(&mm->mm_users) <= 1) {
- task_unlock(p);
+ if (sig->flags & SIGNAL_GROUP_EXIT)
return true;
- }

- /* pin the mm to not get freed and reused */
- atomic_inc(&mm->mm_count);
- task_unlock(p);
+ if (thread_group_empty(task) && PF_EXITING)
+ return true;

- /*
- * This is really pessimistic but we do not have any reliable way
- * to check that external processes share with our mm
- */
- rcu_read_lock();
- for_each_process(p) {
- ret = __task_will_free_mem(p);
- if (!ret)
- break;
- }
- rcu_read_unlock();
- mmdrop(mm);
-
- return ret;
+ return false;
}

+bool task_will_free_mem(struct task_struct *task);
+
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 0b7c02869bc0..aa28315ac310 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -697,6 +697,62 @@ void oom_killer_enable(void)
}

/*
+ * Checks whether the given task is dying or exiting and likely to
+ * release its address space. This means that all threads and processes
+ * sharing the same mm have to be killed or exiting.
+ */
+bool task_will_free_mem(struct task_struct *task)
+{
+ struct mm_struct *mm = NULL;
+ struct task_struct *p;
+ bool ret;
+
+ /*
+ * If the process has passed exit_mm we have to skip it because
+ * we have lost a link to other tasks sharing this mm, we do not
+ * have anything to reap and the task might then get stuck waiting
+ * for parent as zombie and we do not want it to hold TIF_MEMDIE
+ */
+ p = find_lock_task_mm(task);
+ if (!p)
+ return false;
+
+ if (!__task_will_free_mem(p)) {
+ task_unlock(p);
+ return false;
+ }
+
+ mm = p->mm;
+ if (atomic_read(&mm->mm_users) <= 1) {
+ task_unlock(p);
+ return true;
+ }
+
+ /* pin the mm to not get freed and reused */
+ atomic_inc(&mm->mm_count);
+ task_unlock(p);
+
+ /*
+ * This is really pessimistic but we do not have any reliable way
+ * to check that external processes share with our mm
+ */
+ rcu_read_lock();
+ for_each_process(p) {
+ if (!process_shares_mm(p, mm))
+ continue;
+ if (same_thread_group(task, p))
+ continue;
+ ret = __task_will_free_mem(p);
+ if (!ret)
+ break;
+ }
+ rcu_read_unlock();
+ mmdrop(mm);
+
+ return ret;
+}
+
+/*
* Must be called while holding a reference to p, which will be released upon
* returning.
*/
--
2.8.1

--
Michal Hocko
SUSE Labs