[RFC PATCH v2 14/14] dcache: Implement object migration

From: Tobin C. Harding
Date: Wed Apr 03 2019 - 00:31:18 EST


The dentry slab cache is susceptible to internal fragmentation. Now
that we have Slab Movable Objects we can defragment the dcache. Object
migration is only possible for dentry objects that are not currently
referenced by anyone, i.e. we are using the object migration
infrastructure to free unused dentries.

Implement isolate and migrate functions for the dentry slab cache.

Signed-off-by: Tobin C. Harding <tobin@xxxxxxxxxx>
---
fs/dcache.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 87 insertions(+)

diff --git a/fs/dcache.c b/fs/dcache.c
index 606844ad5171..4387715b7ebb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -30,6 +30,7 @@
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/list_lru.h>
+#include <linux/backing-dev.h>
#include "internal.h"
#include "mount.h"

@@ -3074,6 +3075,90 @@ void d_tmpfile(struct dentry *dentry, struct inode *inode)
}
EXPORT_SYMBOL(d_tmpfile);

+/*
+ * d_isolate() - Dentry isolation callback function.
+ * @s: The dentry cache.
+ * @v: Vector of pointers to the objects to migrate.
+ * @nr: Number of objects in @v.
+ *
+ * The slab allocator is holding off frees. We can safely examine
+ * the object without the danger of it vanishing from under us.
+ */
+static void *d_isolate(struct kmem_cache *s, void **v, int nr)
+{
+ struct dentry *dentry;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ dentry = v[i];
+ spin_lock(&dentry->d_lock);
+ /*
+ * Three sorts of dentries cannot be reclaimed:
+ *
+ * 1. dentries that are in the process of being allocated
+ * or being freed. In that case the dentry is neither
+ * on the LRU nor hashed.
+ *
+ * 2. Fake hashed entries as used for anonymous dentries
+ * and pipe I/O. The fake hashed entries have d_flags
+ * set to indicate a hashed entry. However, the
+ * d_hash field indicates that the entry is not hashed.
+ *
+ * 3. dentries that have a backing store that is not
+ * writable. This is true for tmpsfs and other in
+ * memory filesystems. Removing dentries from them
+ * would loose dentries for good.
+ */
+ if ((d_unhashed(dentry) && list_empty(&dentry->d_lru)) ||
+ (!d_unhashed(dentry) && hlist_bl_unhashed(&dentry->d_hash)) ||
+ (dentry->d_inode &&
+ !mapping_cap_writeback_dirty(dentry->d_inode->i_mapping))) {
+ /* Ignore this dentry */
+ v[i] = NULL;
+ } else {
+ __dget_dlock(dentry);
+ }
+ spin_unlock(&dentry->d_lock);
+ }
+ return NULL; /* No need for private data */
+}
+
+/*
+ * d_migrate() - Dentry migration callback function.
+ * @s: The dentry cache.
+ * @v: Vector of pointers to the objects to migrate.
+ * @nr: Number of objects in @v.
+ * @node: The NUMA node where new object should be allocated.
+ * @private: Returned by d_isolate() (currently %NULL).
+ *
+ * Slab has dropped all the locks. Get rid of the refcount obtained
+ * earlier and also free the object.
+ */
+static void d_migrate(struct kmem_cache *s, void **v, int nr,
+ int node, void *_unused)
+{
+ struct dentry *dentry;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ dentry = v[i];
+ if (dentry)
+ d_invalidate(dentry);
+ }
+
+ for (i = 0; i < nr; i++) {
+ dentry = v[i];
+ if (dentry)
+ dput(dentry);
+ }
+
+ /*
+ * dentries are freed using RCU so we need to wait until RCU
+ * operations are complete.
+ */
+ synchronize_rcu();
+}
+
static __initdata unsigned long dhash_entries;
static int __init set_dhash_entries(char *str)
{
@@ -3119,6 +3204,8 @@ static void __init dcache_init(void)
sizeof_field(struct dentry, d_iname),
dcache_ctor);

+ kmem_cache_setup_mobility(dentry_cache, d_isolate, d_migrate);
+
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
return;
--
2.21.0