[PATCH 2/7] kprobes: Propagate error from arm_kprobe_ftrace()

From: Petr Mladek
Date: Thu Feb 26 2015 - 11:15:37 EST


arm_kprobe_ftrace() could fail, especially after introducing ftrace IPMODIFY
flag and LifePatching.

registry_kprobe() and registry_aggr_kprobe() do not mind about the error
because the kprobe gets disabled and they keep it registered.

But enable_kprobe() should propagate the error because its tasks
fails if ftrace fails.

Also arm_all_kprobes() should return error if it happens. The behavior
is a bit questionable here. This patch keeps the existing behavior and does
the best effort. It tries to enable as many Kprobes as possible. It returns
only the last error code if any. kprobes_all_disarmed is always cleared and
the message about finished action is always printed. There is going to be
a separate patch that will improve the behavior.

Signed-off-by: Petr Mladek <pmladek@xxxxxxx>
---
kernel/kprobes.c | 47 ++++++++++++++++++++++++++++++++---------------
1 file changed, 32 insertions(+), 15 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d1b9db690b9c..a69d23add983 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -929,7 +929,7 @@ static int prepare_kprobe(struct kprobe *p)
}

/* Caller must lock kprobe_mutex */
-static void arm_kprobe_ftrace(struct kprobe *p)
+static int arm_kprobe_ftrace(struct kprobe *p)
{
struct kprobe *kp;
int ret;
@@ -949,7 +949,7 @@ static void arm_kprobe_ftrace(struct kprobe *p)
goto err_function;
}
kprobe_ftrace_enabled++;
- return;
+ return ret;

err_function:
ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
@@ -958,6 +958,7 @@ err_filter:
if (kprobe_aggrprobe(p))
list_for_each_entry_rcu(kp, &p->list, list)
kp->flags |= KPROBE_FLAG_DISABLED;
+ return ret;
}

/* Caller must lock kprobe_mutex */
@@ -976,17 +977,15 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
}
#else /* !CONFIG_KPROBES_ON_FTRACE */
#define prepare_kprobe(p) arch_prepare_kprobe(p)
-#define arm_kprobe_ftrace(p) do {} while (0)
+#define arm_kprobe_ftrace(p) (0)
#define disarm_kprobe_ftrace(p) do {} while (0)
#endif

/* Arm a kprobe with text_mutex */
-static void arm_kprobe(struct kprobe *kp)
+static int arm_kprobe(struct kprobe *kp)
{
- if (unlikely(kprobe_ftrace(kp))) {
- arm_kprobe_ftrace(kp);
- return;
- }
+ if (unlikely(kprobe_ftrace(kp)))
+ return arm_kprobe_ftrace(kp);
/*
* Here, since __arm_kprobe() doesn't use stop_machine(),
* this doesn't cause deadlock on text_mutex. So, we don't
@@ -995,6 +994,7 @@ static void arm_kprobe(struct kprobe *kp)
mutex_lock(&text_mutex);
__arm_kprobe(kp);
mutex_unlock(&text_mutex);
+ return 0;
}

/* Disarm a kprobe with text_mutex */
@@ -1332,10 +1332,15 @@ out:
put_online_cpus();
jump_label_unlock();

+ /* Arm when this is the first enabled kprobe at this address */
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed)
- /* Arm the breakpoint again. */
+ /*
+ * The kprobe is disabled and warning is printed
+ * on error. But we ignore the error code here
+ * because we keep it registered.
+ */
arm_kprobe(ap);
}
return ret;
@@ -1540,6 +1545,11 @@ int register_kprobe(struct kprobe *p)
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);

if (!kprobes_all_disarmed && !kprobe_disabled(p))
+ /*
+ * The kprobe is disabled and warning is printed on error.
+ * But we ignore the error code here because we keep it
+ * registered.
+ */
arm_kprobe(p);

/* Try to optimize kprobe */
@@ -2040,7 +2050,7 @@ int enable_kprobe(struct kprobe *kp)

if (!kprobes_all_disarmed && kprobe_disabled(p)) {
p->flags &= ~KPROBE_FLAG_DISABLED;
- arm_kprobe(p);
+ ret = arm_kprobe(p);
}
out:
mutex_unlock(&kprobe_mutex);
@@ -2325,11 +2335,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
.release = seq_release,
};

-static void arm_all_kprobes(void)
+static int arm_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
+ int err, ret = 0;

mutex_lock(&kprobe_mutex);

@@ -2341,8 +2352,11 @@ static void arm_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, head, hlist)
- if (!kprobe_disabled(p))
- arm_kprobe(p);
+ if (!kprobe_disabled(p)) {
+ err = arm_kprobe(p);
+ if (err)
+ ret = err;
+ }
}

kprobes_all_disarmed = false;
@@ -2350,7 +2364,7 @@ static void arm_all_kprobes(void)

already_enabled:
mutex_unlock(&kprobe_mutex);
- return;
+ return ret;
}

static void disarm_all_kprobes(void)
@@ -2407,6 +2421,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
{
char buf[32];
size_t buf_size;
+ int err = 0;

buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
@@ -2417,7 +2432,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'y':
case 'Y':
case '1':
- arm_all_kprobes();
+ err = arm_all_kprobes();
break;
case 'n':
case 'N':
@@ -2428,6 +2443,8 @@ static ssize_t write_enabled_file_bool(struct file *file,
return -EINVAL;
}

+ if (err)
+ return err;
return count;
}

--
1.8.5.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/