[PATCH 2/6] locking/lockdep: Mark local_lock_t

From: Peter Zijlstra
Date: Thu Dec 10 2020 - 09:54:19 EST


The local_lock_t's are special, because they cannot form IRQ
inversions, make sure we can tell them apart from the rest of the
locks.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
include/linux/local_lock_internal.h | 5 ++++-
include/linux/lockdep.h | 15 ++++++++++++---
include/linux/lockdep_types.h | 18 ++++++++++++++----
kernel/locking/lockdep.c | 16 +++++++++-------
4 files changed, 39 insertions(+), 15 deletions(-)

--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -18,6 +18,7 @@ typedef struct {
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
}
#else
# define LL_DEP_MAP_INIT(lockname)
@@ -30,7 +31,9 @@ do { \
static struct lock_class_key __key; \
\
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
- lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
+ LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
} while (0)

#ifdef CONFIG_DEBUG_LOCK_ALLOC
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -185,12 +185,19 @@ extern void lockdep_unregister_key(struc
* to lockdep:
*/

-extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass, short inner, short outer);
+extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
+
+static inline void
+lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer)
+{
+ lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
+}

static inline void
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass, short inner)
+ struct lock_class_key *key, int subclass, u8 inner)
{
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
}
@@ -340,6 +347,8 @@ static inline void lockdep_set_selftest_
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
+# define lockdep_init_map_type(lock, name, key, sub, inner, outer) \
+ do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_wait(lock, name, key, sub, inner) \
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -30,6 +30,12 @@ enum lockdep_wait_type {
LD_WAIT_MAX, /* must be last */
};

+enum lockdep_lock_type {
+ LD_LOCK_NORMAL = 0, /* normal, catch all */
+ LD_LOCK_PERCPU, /* percpu */
+ LD_LOCK_MAX,
+};
+
#ifdef CONFIG_LOCKDEP

/*
@@ -119,8 +125,10 @@ struct lock_class {
int name_version;
const char *name;

- short wait_type_inner;
- short wait_type_outer;
+ u8 wait_type_inner;
+ u8 wait_type_outer;
+ u8 lock_type;
+ /* u8 hole; */

#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
@@ -169,8 +177,10 @@ struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
- short wait_type_outer; /* can be taken in this context */
- short wait_type_inner; /* presents this context */
+ u8 wait_type_outer; /* can be taken in this context */
+ u8 wait_type_inner; /* presents this context */
+ u8 lock_type;
+ /* u8 hole; */
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1290,6 +1290,7 @@ register_lock_class(struct lockdep_map *
class->name_version = count_matching_names(class);
class->wait_type_inner = lock->wait_type_inner;
class->wait_type_outer = lock->wait_type_outer;
+ class->lock_type = lock->lock_type;
/*
* We use RCU's safe list-add method to make
* parallel walking of the hash-list safe:
@@ -4503,9 +4504,9 @@ print_lock_invalid_wait_context(struct t
*/
static int check_wait_context(struct task_struct *curr, struct held_lock *next)
{
- short next_inner = hlock_class(next)->wait_type_inner;
- short next_outer = hlock_class(next)->wait_type_outer;
- short curr_inner;
+ u8 next_inner = hlock_class(next)->wait_type_inner;
+ u8 next_outer = hlock_class(next)->wait_type_outer;
+ u8 curr_inner;
int depth;

if (!next_inner || next->trylock)
@@ -4532,7 +4533,7 @@ static int check_wait_context(struct tas

for (; depth < curr->lockdep_depth; depth++) {
struct held_lock *prev = curr->held_locks + depth;
- short prev_inner = hlock_class(prev)->wait_type_inner;
+ u8 prev_inner = hlock_class(prev)->wait_type_inner;

if (prev_inner) {
/*
@@ -4581,9 +4582,9 @@ static inline int check_wait_context(str
/*
* Initialize a lock instance's lock-class mapping info:
*/
-void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass,
- short inner, short outer)
+ u8 inner, u8 outer, u8 lock_type)
{
int i;

@@ -4606,6 +4607,7 @@ void lockdep_init_map_waits(struct lockd

lock->wait_type_outer = outer;
lock->wait_type_inner = inner;
+ lock->lock_type = lock_type;

/*
* No key, no joy, we need to hash something.
@@ -4640,7 +4642,7 @@ void lockdep_init_map_waits(struct lockd
raw_local_irq_restore(flags);
}
}
-EXPORT_SYMBOL_GPL(lockdep_init_map_waits);
+EXPORT_SYMBOL_GPL(lockdep_init_map_type);

struct lock_class_key __lockdep_no_validate__;
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);