[PATCH 4/7] kprobes: Keep consistent state of kprobes_all_disarmed

From: Petr Mladek
Date: Thu Feb 26 2015 - 11:13:26 EST


kprobes_all_disarmed global flag says that Kprobes are disarmed even
when the Kprobe-specific KPROBE_FLAG_DISABLED is not set.

The global flag is currently set by arm_all_probes() and disarm_all_probes()
functions even when they were not able to switch all Kprobes. It might result
in further errors.

This patch tries to restore the consistent state when some particular
Kprobe cannot be (dis)armed. In this case, it reverts the already switched
Kprobes to the previous state.

The implementation splits the cycle modifying all the probes into
separate functions, so that they could be reused to restore the
original state.

The kprobes_all_disarmed flag is modified only when all Kprobes were
successfully switched.

In case of error, we call wait_for_kprobe_optimizer() also in
arm_all_kprobes() to be on the safe side.

Signed-off-by: Petr Mladek <pmladek@xxxxxxx>
---
kernel/kprobes.c | 124 ++++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 95 insertions(+), 29 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ba57147bd52c..1fcb19095b43 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2353,44 +2353,117 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
.release = seq_release,
};

-static int arm_all_kprobes(void)
+static int __disarm_all_kprobes(unsigned int last_table,
+ struct kprobe *last_kprobe);
+
+/*
+ * Arm all enabled Kprobes until you reach the one given by parameters.
+ * It tries to restore the original state on error.
+ *
+ * All Kprobes are handled when @last_table == KPROBE_TABLE_SIZE - 1
+ * and last_kprobe == NULL.
+ *
+ * This function need to be called under kprobe_mutex
+ */
+static int __arm_all_kprobes(unsigned int last_table,
+ struct kprobe *last_kprobe)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
int err, ret = 0;

- mutex_lock(&kprobe_mutex);
-
- /* If kprobes are armed, just return */
- if (!kprobes_all_disarmed)
- goto already_enabled;
-
/* Arming kprobes doesn't optimize kprobe itself */
- for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ for (i = 0; i <= last_table ; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, head, hlist)
+ hlist_for_each_entry_rcu(p, head, hlist) {
+ if (i == last_table && p == last_kprobe)
+ return 0;
if (!kprobe_disabled(p)) {
err = arm_kprobe(p);
- if (err)
- ret = err;
+ if (!err)
+ continue;
+ /*
+ * Try to restore the original consistent state.
+ * But only when all Kprobes are proceed here
+ * to avoid an infinite loop.
+ */
+ if (!last_kprobe)
+ WARN_ON(__disarm_all_kprobes(i, p));
+ return ret;
}
+ }
}

- kprobes_all_disarmed = false;
- printk(KERN_INFO "Kprobes globally enabled\n");
+ return 0;
+}
+
+static int arm_all_kprobes(void)
+{
+ int ret = 0;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* If kprobes are armed, just return */
+ if (!kprobes_all_disarmed) {
+ mutex_unlock(&kprobe_mutex);
+ return 0;
+ }
+
+ ret = __arm_all_kprobes(KPROBE_TABLE_SIZE - 1, NULL);
+ if (!ret) {
+ kprobes_all_disarmed = false;
+ pr_info("Kprobes globally enabled\n");
+ }

-already_enabled:
mutex_unlock(&kprobe_mutex);
+
+ /*
+ * On error, some Kprobes were armed and disarmed again. Be on the safe
+ * side and wait for disarming all kprobes by optimizer in this case.
+ */
+ if (ret)
+ wait_for_kprobe_optimizer();
+
return ret;
}

-static int disarm_all_kprobes(void)
+/* Reverse operation for __arm_all_kprobes(), see above for details */
+static int __disarm_all_kprobes(unsigned int last_table,
+ struct kprobe *last_kprobe)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
- int err, ret = 0;
+ int err;
+
+ for (i = 0; i <= last_table; i++) {
+ head = &kprobe_table[i];
+ hlist_for_each_entry_rcu(p, head, hlist) {
+ if (i == last_table && p == last_kprobe)
+ return 0;
+ if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
+ err = disarm_kprobe(p, false);
+ if (!err)
+ continue;
+ /*
+ * Try to restore the original consistent state.
+ * But only when all Kprobes are proceed here
+ * to avoid an infinite loop.
+ */
+ if (!last_kprobe)
+ WARN_ON(__arm_all_kprobes(i, p));
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int disarm_all_kprobes(void)
+{
+ int ret;

mutex_lock(&kprobe_mutex);

@@ -2400,24 +2473,17 @@ static int disarm_all_kprobes(void)
return 0;
}

- for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
- head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, head, hlist) {
- if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
- err = disarm_kprobe(p, false);
- if (err)
- ret = err;
- }
- }
+ ret = __disarm_all_kprobes(KPROBE_TABLE_SIZE - 1, NULL);
+ if (!ret) {
+ kprobes_all_disarmed = true;
+ pr_info("Kprobes globally disabled\n");
}

- kprobes_all_disarmed = true;
- pr_info("Kprobes globally disabled\n");
-
mutex_unlock(&kprobe_mutex);

/* Wait for disarming all kprobes by optimizer */
- wait_for_kprobe_optimizer();
+ if (!ret)
+ wait_for_kprobe_optimizer();

return ret;
}
--
1.8.5.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/