Re: [PATCH v8 06/14] lockdep: Detect and handle hist_lock ring buffer overwrite

From: Boqun Feng
Date: Thu Aug 10 2017 - 09:17:30 EST


On Thu, Aug 10, 2017 at 08:51:33PM +0800, Boqun Feng wrote:
[...]
> > > > + /* Check if the ring was overwritten. */
> > > > + if (h->hist_id != cur->hist_id_save[c])
> > >
> > > Could we use:
> > >
> > > if (h->hist_id != idx)
> >
> > No, we cannot.
> >
>
> Hey, I'm not buying it. task_struct::hist_id and task_struct::xhlock_idx
> are increased at the same place(in add_xhlock()), right?
>
> And, yes, xhlock_idx will get decreased when we do ring-buffer
> unwinding, but that's OK, because we need to throw away those recently
> added items.
>
> And xhlock_idx always points to the most recently added valid item,
> right? Any other item's idx must "before()" the most recently added
> one's, right? So ::xhlock_idx acts just like a timestamp, doesn't it?
>
> Maybe I'm missing something subtle, but could you show me an example,
> that could end up being a problem if we use xhlock_idx as the hist_id?
>
> > hist_id is a kind of timestamp and used to detect overwriting
> > data into places of same indexes of the ring buffer. And idx is
> > just an index. :) IOW, they mean different things.
> >
> > >
> > > here, and
> > >
> > > > + invalidate_xhlock(h);
> > > > + }
> > > > }
> > > >
> > > > static int cross_lock(struct lockdep_map *lock)
> > > > @@ -4826,6 +4851,7 @@ static inline int depend_after(struct held_lock
> > > *hlock)
> > > > * Check if the xhlock is valid, which would be false if,
> > > > *
> > > > * 1. Has not used after initializaion yet.
> > > > + * 2. Got invalidated.
> > > > *
> > > > * Remind hist_lock is implemented as a ring buffer.
> > > > */
> > > > @@ -4857,6 +4883,7 @@ static void add_xhlock(struct held_lock *hlock)
> > > >
> > > > /* Initialize hist_lock's members */
> > > > xhlock->hlock = *hlock;
> > > > + xhlock->hist_id = current->hist_id++;
>
> Besides, is this code correct? Does this just make xhlock->hist_id
> one-less-than the curr->hist_id, which cause the invalidation every time
> you do ring buffer unwinding?
>
> Regards,
> Boqun
>

So basically, I'm suggesting do this on top of your patch, there is also
a fix in commit_xhlocks(), which I think you should swap the parameters
in before(...), no matter using task_struct::hist_id or using
task_struct::xhlock_idx as the timestamp.

Hope this could make my point more clear, and if I do miss something,
please point it out, thanks ;-)

Regards,
Boqun
------------>8

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 074872f016f8..886ba79bfc38 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -854,9 +854,6 @@ struct task_struct {
unsigned int xhlock_idx;
/* For restoring at history boundaries */
unsigned int xhlock_idx_hist[XHLOCK_NR];
- unsigned int hist_id;
- /* For overwrite check at each context exit */
- unsigned int hist_id_save[XHLOCK_NR];
#endif

#ifdef CONFIG_UBSAN
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 699fbeab1920..04c6c8d68e18 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4752,10 +4752,8 @@ void crossrelease_hist_start(enum xhlock_context_t c)
{
struct task_struct *cur = current;

- if (cur->xhlocks) {
+ if (cur->xhlocks)
cur->xhlock_idx_hist[c] = cur->xhlock_idx;
- cur->hist_id_save[c] = cur->hist_id;
- }
}

void crossrelease_hist_end(enum xhlock_context_t c)
@@ -4769,7 +4767,7 @@ void crossrelease_hist_end(enum xhlock_context_t c)
cur->xhlock_idx = idx;

/* Check if the ring was overwritten. */
- if (h->hist_id != cur->hist_id_save[c])
+ if (h->hist_id != idx)
invalidate_xhlock(h);
}
}
@@ -4849,7 +4847,7 @@ static void add_xhlock(struct held_lock *hlock)

/* Initialize hist_lock's members */
xhlock->hlock = *hlock;
- xhlock->hist_id = current->hist_id++;
+ xhlock->hist_id = idx;

xhlock->trace.nr_entries = 0;
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
@@ -5005,7 +5003,7 @@ static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
static void commit_xhlocks(struct cross_lock *xlock)
{
unsigned int cur = current->xhlock_idx;
- unsigned int prev_hist_id = xhlock(cur).hist_id;
+ unsigned int prev_hist_id = cur + 1;
unsigned int i;

if (!graph_lock())
@@ -5030,7 +5028,7 @@ static void commit_xhlocks(struct cross_lock *xlock)
* hist_id than the following one, which is impossible
* otherwise.
*/
- if (unlikely(before(xhlock->hist_id, prev_hist_id)))
+ if (unlikely(before(prev_hist_id, xhlock->hist_id)))
break;

prev_hist_id = xhlock->hist_id;
@@ -5120,12 +5118,9 @@ void lockdep_init_task(struct task_struct *task)
int i;

task->xhlock_idx = UINT_MAX;
- task->hist_id = 0;

- for (i = 0; i < XHLOCK_NR; i++) {
+ for (i = 0; i < XHLOCK_NR; i++)
task->xhlock_idx_hist[i] = UINT_MAX;
- task->hist_id_save[i] = 0;
- }

task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
GFP_KERNEL);