[PATCH] powerpc/8xx: Fix permanently mapped IMMR region.

From: Christophe Leroy
Date: Fri Aug 23 2019 - 05:56:31 EST


When not using large TLBs, the IMMR region is still
mapped as a whole block in the FIXMAP area.

Do not remove pages mapped in the FIXMAP region when
initialising paging.

Properly report that the IMMR region is block-mapped even
when not using large TLBs.

Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxx>
---
arch/powerpc/mm/mem.c | 8 --------
arch/powerpc/mm/nohash/8xx.c | 13 +++++++------
2 files changed, 7 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 69f99128a8d6..8e221d8744ba 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -216,14 +216,6 @@ void __init paging_init(void)
unsigned long long total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM();

-#ifdef CONFIG_PPC32
- unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
- unsigned long end = __fix_to_virt(FIX_HOLE);
-
- for (; v < end; v += PAGE_SIZE)
- map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
-#endif
-
#ifdef CONFIG_HIGHMEM
map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 4a06cb342da2..e6918235fe04 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -21,33 +21,34 @@ extern int __map_without_ltlbs;
static unsigned long block_mapped_ram;

/*
- * Return PA for this VA if it is in an area mapped with LTLBs.
+ * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
* Otherwise, returns 0
*/
phys_addr_t v_block_mapped(unsigned long va)
{
unsigned long p = PHYS_IMMR_BASE;

- if (__map_without_ltlbs)
- return 0;
if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
return p + va - VIRT_IMMR_BASE;
+ if (__map_without_ltlbs)
+ return 0;
if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
return __pa(va);
return 0;
}

/*
- * Return VA for a given PA mapped with LTLBs or 0 if not mapped
+ * Return VA for a given PA mapped with LTLBs or fixmap
+ * Return 0 if not mapped
*/
unsigned long p_block_mapped(phys_addr_t pa)
{
unsigned long p = PHYS_IMMR_BASE;

- if (__map_without_ltlbs)
- return 0;
if (pa >= p && pa < p + IMMR_SIZE)
return VIRT_IMMR_BASE + pa - p;
+ if (__map_without_ltlbs)
+ return 0;
if (pa < block_mapped_ram)
return (unsigned long)__va(pa);
return 0;
--
2.13.3