[PATCH 1/3] mm/slab: use list_first_entry_or_null()

From: Geliang Tang
Date: Wed Dec 02 2015 - 10:48:13 EST


Simplify the code with list_first_entry_or_null().

Signed-off-by: Geliang Tang <geliangtang@xxxxxxx>
---
mm/slab.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 4765c97..6bb0466 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2791,18 +2791,18 @@ retry:
}

while (batchcount > 0) {
- struct list_head *entry;
struct page *page;
/* Get slab alloc is to come from. */
- entry = n->slabs_partial.next;
- if (entry == &n->slabs_partial) {
+ page = list_first_entry_or_null(&n->slabs_partial,
+ struct page, lru);
+ if (!page) {
n->free_touched = 1;
- entry = n->slabs_free.next;
- if (entry == &n->slabs_free)
+ page = list_first_entry_or_null(&n->slabs_free,
+ struct page, lru);
+ if (!page)
goto must_grow;
}

- page = list_entry(entry, struct page, lru);
check_spinlock_acquired(cachep);

/*
@@ -3085,7 +3085,6 @@ retry:
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
- struct list_head *entry;
struct page *page;
struct kmem_cache_node *n;
void *obj;
@@ -3098,15 +3097,16 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
retry:
check_irq_off();
spin_lock(&n->list_lock);
- entry = n->slabs_partial.next;
- if (entry == &n->slabs_partial) {
+ page = list_first_entry_or_null(&n->slabs_partial,
+ struct page, lru);
+ if (!page) {
n->free_touched = 1;
- entry = n->slabs_free.next;
- if (entry == &n->slabs_free)
+ page = list_first_entry_or_null(&n->slabs_free,
+ struct page, lru);
+ if (!page)
goto must_grow;
}

- page = list_entry(entry, struct page, lru);
check_spinlock_acquired_node(cachep, nodeid);

STATS_INC_NODEALLOCS(cachep);
--
2.5.0


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/