[PATCH 25/27] locking/lockdep: Add support for dynamic keys

From: Bart Van Assche
Date: Wed Nov 28 2018 - 18:44:34 EST


A shortcoming of the current lockdep implementation is that it requires
lock keys to be allocated statically. That forces certain lock objects
to share lock keys. Since lock dependency analysis groups lock objects
per key sharing lock keys can cause false positive lockdep reports.
Make it possible to avoid such false positive reports by allowing lock
keys to be allocated dynamically. Require that dynamically allocated
lock keys are registered before use by calling lockdep_register_key().
Complain about attempts to register the same lock key pointer twice
without calling lockdep_unregister_key() between successive
registration calls.

Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
---
include/linux/lockdep.h | 13 ++++-
kernel/locking/lockdep.c | 121 ++++++++++++++++++++++++++++++++++++---
2 files changed, 123 insertions(+), 11 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 01e55fca7c2c..bd6bfad66382 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -46,15 +46,19 @@ extern int lock_stat;
#define NR_LOCKDEP_CACHING_CLASSES 2

/*
- * Lock-classes are keyed via unique addresses, by embedding the
- * lockclass-key into the kernel (or module) .data section. (For
- * static locks we use the lock address itself as the key.)
+ * A lockdep key is associated with each lock object. For static locks we use
+ * the lock address itself as the key. Dynamically allocated lock objects can
+ * have a statically or dynamically allocated key. Dynamically allocated lock
+ * keys must be registered before being used and must be unregistered before
+ * the key memory is freed.
*/
struct lockdep_subclass_key {
char __one_byte;
} __attribute__ ((__packed__));

+/* hash_entry is used to keep track of dynamically allocated keys. */
struct lock_class_key {
+ struct hlist_node hash_entry;
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};

@@ -280,6 +284,9 @@ extern asmlinkage void lockdep_sys_exit(void);
extern void lockdep_off(void);
extern void lockdep_on(void);

+extern void lockdep_register_key(struct lock_class_key *key);
+extern void lockdep_unregister_key(struct lock_class_key *key);
+
/*
* These methods are used by specific locking variants (spinlocks,
* rwlocks, mutexes and rwsems) to pass init/acquire/release events
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0e273731d028..213153b10951 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -142,6 +142,9 @@ static LIST_HEAD(free_list_entries);
* free_lock_classes points at the first free element. These elements are
* linked together by the lock_entry member in struct lock_class.
*/
+#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
+#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
+static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
static LIST_HEAD(free_lock_classes);
@@ -602,7 +605,7 @@ static int very_verbose(struct lock_class *class)
* Is this the address of a static object:
*/
#ifdef __KERNEL__
-static int static_obj(void *obj)
+static int static_obj(const void *obj)
{
unsigned long start = (unsigned long) &_stext,
end = (unsigned long) &_end,
@@ -881,6 +884,70 @@ static void init_lists(void)
list_add_tail(&list_entries[i].alloc_entry, &free_list_entries);
}

+static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
+{
+ unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
+
+ return lock_keys_hash + hash;
+}
+
+/*
+ * Register a dynamically allocated key.
+ */
+void lockdep_register_key(struct lock_class_key *key)
+{
+ struct hlist_head *hash_head;
+ struct lock_class_key *k;
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(static_obj(key)))
+ return;
+ hash_head = keyhashentry(key);
+ raw_local_irq_save(flags);
+ if (!graph_lock())
+ goto restore_irqs;
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+ if (WARN_ON_ONCE(k == key))
+ goto out_unlock;
+ }
+ hlist_add_head_rcu(&key->hash_entry, hash_head);
+out_unlock:
+ graph_unlock();
+restore_irqs:
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lockdep_register_key);
+
+/*
+ * Check whether a key has been registered as a dynamic key. Must not be called
+ * from interrupt context.
+ */
+static bool is_dynamic_key(const struct lock_class_key *key)
+{
+ struct hlist_head *hash_head;
+ struct lock_class_key *k;
+ unsigned long flags;
+ bool found = false;
+
+ if (WARN_ON_ONCE(static_obj(key)))
+ return false;
+ hash_head = keyhashentry(key);
+ raw_local_irq_save(flags);
+ if (!graph_lock())
+ goto restore_irqs;
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+ if (k == key) {
+ found = true;
+ break;
+ }
+ }
+ graph_unlock();
+restore_irqs:
+ raw_local_irq_restore(flags);
+
+ return found;
+}
+
/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
@@ -905,7 +972,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (!lock->key) {
if (!assign_lock_key(lock))
return NULL;
- } else if (!static_obj(lock->key)) {
+ } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
return NULL;
}

@@ -3284,13 +3351,13 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (DEBUG_LOCKS_WARN_ON(!key))
return;
/*
- * Sanity check, the lock-class key must be persistent:
+ * Sanity check, the lock-class key must either have been allocated
+ * statically or must have been registered as a dynamic key.
*/
- if (!static_obj(key)) {
- printk("BUG: key %px not in .data!\n", key);
- /*
- * What it says above ^^^^^, I suggest you read it.
- */
+ if (!static_obj(key) && !is_dynamic_key(key)) {
+ if (debug_locks)
+ printk(KERN_INFO "BUG: key %px has not been registered!\n",
+ key);
DEBUG_LOCKS_WARN_ON(1);
return;
}
@@ -4510,6 +4577,44 @@ void lockdep_free_key_range(void *start, unsigned long size)
free_zapped_classes(&zapped_classes);
}

+
+/*
+ * Unregister a dynamically allocated key. Must not be called from interrupt
+ * context. The caller must ensure that freeing @key only happens after an RCU
+ * grace period.
+ */
+void lockdep_unregister_key(struct lock_class_key *key)
+{
+ struct list_head zapped_classes;
+ struct hlist_head *hash_head = keyhashentry(key);
+ struct lock_class_key *k;
+ unsigned long flags;
+ bool found = false;
+
+ if (WARN_ON_ONCE(static_obj(key)))
+ return;
+
+ __lockdep_free_key_range(&zapped_classes, key, 1);
+
+ raw_local_irq_save(flags);
+ if (!graph_lock())
+ goto restore_irqs;
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+ if (k == key) {
+ hlist_del_rcu(&k->hash_entry);
+ found = true;
+ break;
+ }
+ }
+ WARN_ON_ONCE(!found);
+ graph_unlock();
+restore_irqs:
+ raw_local_irq_restore(flags);
+
+ free_zapped_classes(&zapped_classes);
+}
+EXPORT_SYMBOL_GPL(lockdep_unregister_key);
+
/*
* Check whether any element of the @lock->class_cache[] array refers to a
* registered lock class. The caller must hold either the graph lock or the
--
2.20.0.rc0.387.gc7a69e6b6c-goog