Re: [PATCH v2 1/6] x86: remove unneeded memblock_find_dma_reserve()

From: Mike Rapoport
Date: Tue Mar 26 2024 - 02:45:00 EST


On Mon, Mar 25, 2024 at 10:56:41PM +0800, Baoquan He wrote:
> Variable dma_reserve and its usage was introduced in commit 0e0b864e069c
> ("[PATCH] Account for memmap and optionally the kernel image as holes").
> Its original purpose was to accounting for the reserved pages in DMA
> zone to make DMA zone's watermarks calculation more accurate on x86.
>
> However, currently there's zone->managed_pages to account for all
> available pages for buddy, zone->present_pages to account for all
> present physical pages in zone. What is more important, on x86,
> calculating and setting the zone->managed_pages is a temporary move,
> all zone's managed_pages will be zeroed out and reset to the actual
> value according to how many pages are added to buddy allocator in
> mem_init(). Before mem_init(), no buddy alloction is requested. And
> zone's pcp and watermark setting are all done after mem_init(). So,
> no need to worry about the DMA zone's setting accuracy during
> free_area_init().
>
> Hence, remove memblock_find_dma_reserve() to stop calculating and
> setting dma_reserve.
>
> Signed-off-by: Baoquan He <bhe@xxxxxxxxxx>

Reviewed-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx>

> ---
> arch/x86/include/asm/pgtable.h | 1 -
> arch/x86/kernel/setup.c | 2 --
> arch/x86/mm/init.c | 47 ----------------------------------
> 3 files changed, 50 deletions(-)
>
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index 315535ffb258..cefc7a84f7a4 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -1200,7 +1200,6 @@ static inline int pgd_none(pgd_t pgd)
> extern int direct_gbpages;
> void init_mem_mapping(void);
> void early_alloc_pgt_buf(void);
> -extern void memblock_find_dma_reserve(void);
> void __init poking_init(void);
> unsigned long init_memory_mapping(unsigned long start,
> unsigned long end, pgprot_t prot);
> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
> index ef206500ed6f..74873bd04ad1 100644
> --- a/arch/x86/kernel/setup.c
> +++ b/arch/x86/kernel/setup.c
> @@ -1106,8 +1106,6 @@ void __init setup_arch(char **cmdline_p)
> */
> arch_reserve_crashkernel();
>
> - memblock_find_dma_reserve();
> -
> if (!early_xdbc_setup_hardware())
> early_xdbc_register_console();
>
> diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
> index 679893ea5e68..615f0bf4bda6 100644
> --- a/arch/x86/mm/init.c
> +++ b/arch/x86/mm/init.c
> @@ -990,53 +990,6 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
> }
> #endif
>
> -/*
> - * Calculate the precise size of the DMA zone (first 16 MB of RAM),
> - * and pass it to the MM layer - to help it set zone watermarks more
> - * accurately.
> - *
> - * Done on 64-bit systems only for the time being, although 32-bit systems
> - * might benefit from this as well.
> - */
> -void __init memblock_find_dma_reserve(void)
> -{
> -#ifdef CONFIG_X86_64
> - u64 nr_pages = 0, nr_free_pages = 0;
> - unsigned long start_pfn, end_pfn;
> - phys_addr_t start_addr, end_addr;
> - int i;
> - u64 u;
> -
> - /*
> - * Iterate over all memory ranges (free and reserved ones alike),
> - * to calculate the total number of pages in the first 16 MB of RAM:
> - */
> - nr_pages = 0;
> - for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
> - start_pfn = min(start_pfn, MAX_DMA_PFN);
> - end_pfn = min(end_pfn, MAX_DMA_PFN);
> -
> - nr_pages += end_pfn - start_pfn;
> - }
> -
> - /*
> - * Iterate over free memory ranges to calculate the number of free
> - * pages in the DMA zone, while not counting potential partial
> - * pages at the beginning or the end of the range:
> - */
> - nr_free_pages = 0;
> - for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
> - start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
> - end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
> -
> - if (start_pfn < end_pfn)
> - nr_free_pages += end_pfn - start_pfn;
> - }
> -
> - set_dma_reserve(nr_pages - nr_free_pages);
> -#endif
> -}
> -
> void __init zone_sizes_init(void)
> {
> unsigned long max_zone_pfns[MAX_NR_ZONES];
> --
> 2.41.0
>

--
Sincerely yours,
Mike.