[PATCH] mm: opt the use of space and tabs

From: 13145886936
Date: Thu Jun 03 2021 - 04:26:46 EST


From: gushengxian <gushengxian@xxxxxxxxxx>

Revised the use of space and tabs.

Signed-off-by: gushengxian <gushengxian@xxxxxxxxxx>
---
mm/memory.c | 2 +-
mm/page_alloc.c | 2 +-
mm/vmscan.c | 2 +-
mm/vmstat.c | 8 ++++----
4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index f85c2c322a23..aec804080ad9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1683,7 +1683,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
if (address < vma->vm_start || address + size > vma->vm_end ||
- !(vma->vm_flags & VM_PFNMAP))
+ !(vma->vm_flags & VM_PFNMAP))
return;

zap_page_range_single(vma, address, size, NULL);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4c468aa596aa..de1ab8073cc6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8908,7 +8908,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
* need to be freed with free_contig_range().
*/
int alloc_contig_range(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask)
+ unsigned migratetype, gfp_t gfp_mask)
{
unsigned long outer_start, outer_end;
unsigned int order;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 60a19fd6ea3f..939bf138478a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4303,7 +4303,7 @@ static int __init kswapd_init(void)

swap_setup();
for_each_node_state(nid, N_MEMORY)
- kswapd_run(nid);
+ kswapd_run(nid);
return 0;
}

diff --git a/mm/vmstat.c b/mm/vmstat.c
index b0534e068166..23504a605c7c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -200,7 +200,7 @@ int calculate_normal_threshold(struct zone *zone)
* The threshold scales with the number of processors and the amount
* of memory per zone. More memory means that we can defer updates for
* longer, more processors could lead to more contention.
- * fls() is used to have a cheap way of logarithmic scaling.
+ * fls() is used to have a cheap way of logarithmic scaling.
*
* Some sample thresholds:
*
@@ -439,7 +439,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)

v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
- if (unlikely(v < - t)) {
+ if (unlikely(v < -t)) {
s8 overstep = t >> 1;

zone_page_state_add(v - overstep, zone, item);
@@ -457,7 +457,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)

v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
- if (unlikely(v < - t)) {
+ if (unlikely(v < -t)) {
s8 overstep = t >> 1;

node_page_state_add(v - overstep, pgdat, item);
@@ -1063,7 +1063,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in
* 0 => allocation would fail due to lack of memory
* 1 => allocation would fail due to fragmentation
*/
- return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
+ return 1000 - div_u64((1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
}

/*
--
2.25.1