[PATCH 07/15] perf: convert perf_event_context.refcount to refcount_t

From: Elena Reshetova
Date: Wed Aug 30 2017 - 08:26:07 EST


atomic_t variables are currently used to implement reference
counters with the following properties:
- counter is initialized to 1 using atomic_set()
- a resource is freed upon counter reaching zero
- once counter reaches zero, its further
increments aren't allowed
- counter schema uses basic atomic operations
(set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable perf_event_context.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook <keescook@xxxxxxxxxxxx>
Reviewed-by: David Windsor <dwindsor@xxxxxxxxx>
Reviewed-by: Hans Liljestrand <ishkamiel@xxxxxxxxx>
Signed-off-by: Elena Reshetova <elena.reshetova@xxxxxxxxx>
---
include/linux/perf_event.h | 3 ++-
kernel/events/core.c | 12 ++++++------
2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9bac4bf..75c11181 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@ struct perf_guest_info_callbacks {
#include <linux/perf_regs.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
+#include <linux/refcount.h>
#include <asm/local.h>

struct perf_callchain_entry {
@@ -737,7 +738,7 @@ struct perf_event_context {
int nr_stat;
int nr_freq;
int rotate_disable;
- atomic_t refcount;
+ refcount_t refcount;
struct task_struct *task;

/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 852768d..57ef7ce 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1109,7 +1109,7 @@ static void perf_event_ctx_deactivate(struct perf_event_context *ctx)

static void get_ctx(struct perf_event_context *ctx)
{
- WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
+ WARN_ON(!refcount_inc_not_zero(&ctx->refcount));
}

static void free_ctx(struct rcu_head *head)
@@ -1123,7 +1123,7 @@ static void free_ctx(struct rcu_head *head)

static void put_ctx(struct perf_event_context *ctx)
{
- if (atomic_dec_and_test(&ctx->refcount)) {
+ if (refcount_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
@@ -1201,7 +1201,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
again:
rcu_read_lock();
ctx = ACCESS_ONCE(event->ctx);
- if (!atomic_inc_not_zero(&ctx->refcount)) {
+ if (!refcount_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
}
@@ -1329,7 +1329,7 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
}

if (ctx->task == TASK_TOMBSTONE ||
- !atomic_inc_not_zero(&ctx->refcount)) {
+ !refcount_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock(&ctx->lock);
ctx = NULL;
} else {
@@ -3795,7 +3795,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
- atomic_set(&ctx->refcount, 1);
+ refcount_set(&ctx->refcount, 1);
}

static struct perf_event_context *
@@ -9812,7 +9812,7 @@ __perf_event_ctx_lock_double(struct perf_event *group_leader,
again:
rcu_read_lock();
gctx = READ_ONCE(group_leader->ctx);
- if (!atomic_inc_not_zero(&gctx->refcount)) {
+ if (!refcount_inc_not_zero(&gctx->refcount)) {
rcu_read_unlock();
goto again;
}
--
2.7.4