[PATCH 2/7] lockdep: isolate lock graph walking

From: Peter Zijlstra
Date: Wed May 23 2007 - 06:03:48 EST


Put the lock dependancy tracking bits under CONFIG_PROVE_LOCKING
This way we can reuse the hold_lock part of lockdep for other purposes.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
---
kernel/lockdep.c | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)

Index: linux-2.6/kernel/lockdep.c
===================================================================
--- linux-2.6.orig/kernel/lockdep.c 2007-05-23 00:22:59.000000000 +0200
+++ linux-2.6/kernel/lockdep.c 2007-05-23 00:42:43.000000000 +0200
@@ -95,6 +95,7 @@ static int lockdep_initialized;
unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];

+#ifdef CONFIG_PROVE_LOCKING
/*
* Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure)
@@ -111,6 +112,7 @@ static struct lock_list *alloc_list_entr
}
return list_entries + nr_list_entries++;
}
+#endif

/*
* All data structures here are protected by the global debug_lock.
@@ -140,7 +142,9 @@ LIST_HEAD(all_lock_classes);
static struct list_head classhash_table[CLASSHASH_SIZE];

unsigned long nr_lock_chains;
+#ifdef CONFIG_PROVE_LOCKING
static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+#endif

/*
* We put the lock dependency chains into a hash-table as well, to cache
@@ -482,6 +486,7 @@ static void print_lock_dependencies(stru
}
}

+#ifdef CONFIG_PROVE_LOCKING
/*
* Add a new dependency to the head of the list:
*/
@@ -541,6 +546,7 @@ print_circular_bug_entry(struct lock_lis

return 0;
}
+#endif

static void print_kernel_version(void)
{
@@ -549,6 +555,7 @@ static void print_kernel_version(void)
init_utsname()->version);
}

+#ifdef CONFIG_PROVE_LOCKING
/*
* When a circular dependency is detected, print the
* header first:
@@ -639,6 +646,7 @@ check_noncircular(struct lock_class *sou
}
return 1;
}
+#endif

static int very_verbose(struct lock_class *class)
{
@@ -823,6 +831,7 @@ check_usage(struct task_struct *curr, st

#endif

+#ifdef CONFIG_PROVE_LOCKING
static int
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
@@ -1087,7 +1096,7 @@ out_bug:

return 0;
}
-
+#endif

/*
* Is this the address of a static object:
@@ -1307,6 +1316,7 @@ out_unlock_set:
return class;
}

+#ifdef CONFIG_PROVE_LOCKING
/*
* Look up a dependency chain. If the key is not present yet then
* add it and return 1 - in this case the new dependency chain is
@@ -1381,6 +1391,7 @@ cache_hit:

return 1;
}
+#endif

/*
* We are building curr_chain_key incrementally, so double-check
@@ -2168,6 +2179,7 @@ out_calc_hash:
chain_key = iterate_chain_key(chain_key, id);
curr->curr_chain_key = chain_key;

+#ifdef CONFIG_PROVE_LOCKING
/*
* Trylock needs to maintain the stack of held locks, but it
* does not add new dependencies, because trylock can be done
@@ -2214,6 +2226,7 @@ out_calc_hash:
/* after lookup_chain_cache(): */
if (unlikely(!debug_locks))
return 0;
+#endif

curr->lockdep_depth++;
check_chain_key(curr);

--

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/