[PATCH] vmcore: call remap_pfn_range() separately for respectivepartial pages

From: HATAYAMA Daisuke
Date: Thu Nov 28 2013 - 00:51:22 EST


Acording to the report by Vivek in
https://lkml.org/lkml/2013/11/13/439, on some specific systems, some
of the System RAM ranges don't end at page boundary and the later part
of the same page is used for some kind of ACPI data. As a result,
remap_pfn_range() to the partial page failed if mapping range covers a
boundary of the System RAM part and the ACPI data part in the partial
page, due to the detection of different cache types in
track_pfn_remap().

To resolve the issue, call remap_pfn_range() separately for respective
partial pages, not for multiple consequtive pages that don't either
start or end at page boundary, by creating vmcore objects for
respective partial pages.

This patch never changes shape of /proc/vmcore visible from user-land.

Reported-by: Vivek Goyal <vgoyal@xxxxxxxxxx>
Signed-off-by: HATAYAMA Daisuke <d.hatayama@xxxxxxxxxxxxxx>
---
fs/proc/vmcore.c | 108 ++++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 84 insertions(+), 24 deletions(-)

diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 9100d69..e396a1d 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -816,26 +816,56 @@ static int __init process_ptload_program_headers_elf64(char *elfptr,
vmcore_off = elfsz + elfnotes_sz;

for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
- u64 paddr, start, end, size;
+ u64 start, end, size, rest;
+ u64 start_up, start_down, end_up, end_down;

if (phdr_ptr->p_type != PT_LOAD)
continue;

- paddr = phdr_ptr->p_offset;
- start = rounddown(paddr, PAGE_SIZE);
- end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
- size = end - start;
+ start = phdr_ptr->p_offset;
+ start_up = roundup(start, PAGE_SIZE);
+ start_down = rounddown(start, PAGE_SIZE);
+ end = phdr_ptr->p_offset + phdr_ptr->p_memsz;
+ end_up = roundup(end, PAGE_SIZE);
+ end_down = rounddown(end, PAGE_SIZE);
+ size = end_up - start_down;
+ rest = phdr_ptr->p_memsz;
+
+ if (!PAGE_ALIGNED(start)) {
+ new = get_new_element();
+ if (!new)
+ return -ENOMEM;
+ new->paddr = start_down;
+ new->size = PAGE_SIZE;
+ list_add_tail(&new->list, vc_list);
+ rest -= min(start_up, end) - start;
+ }

/* Add this contiguous chunk of memory to vmcore list.*/
- new = get_new_element();
- if (!new)
- return -ENOMEM;
- new->paddr = start;
- new->size = size;
- list_add_tail(&new->list, vc_list);
+ if (rest > 0 && start_up < end_down) {
+ new = get_new_element();
+ if (!new)
+ return -ENOMEM;
+ new->paddr = start_up;
+ new->size = end_down - start_up;
+ list_add_tail(&new->list, vc_list);
+ rest -= end_down - start_up;
+ }
+
+ if (rest > 0) {
+ new = get_new_element();
+ if (!new)
+ return -ENOMEM;
+ new->paddr = end_down;
+ new->size = PAGE_SIZE;
+ list_add_tail(&new->list, vc_list);
+ rest -= end - end_down;
+ }
+
+ WARN_ON(rest > 0);

/* Update the program header offset. */
- phdr_ptr->p_offset = vmcore_off + (paddr - start);
+ phdr_ptr->p_offset = vmcore_off + (start - start_down);
vmcore_off = vmcore_off + size;
}
return 0;
@@ -859,26 +889,56 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
vmcore_off = elfsz + elfnotes_sz;

for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
- u64 paddr, start, end, size;
+ u64 start, end, size, rest;
+ u64 start_up, start_down, end_up, end_down;

if (phdr_ptr->p_type != PT_LOAD)
continue;

- paddr = phdr_ptr->p_offset;
- start = rounddown(paddr, PAGE_SIZE);
- end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
- size = end - start;
+ start = phdr_ptr->p_offset;
+ start_up = roundup(start, PAGE_SIZE);
+ start_down = rounddown(start, PAGE_SIZE);
+ end = phdr_ptr->p_offset + phdr_ptr->p_memsz;
+ end_up = roundup(end, PAGE_SIZE);
+ end_down = rounddown(end, PAGE_SIZE);
+ rest = phdr_ptr->p_memsz;
+ size = end_up - start_down;
+
+ if (!PAGE_ALIGNED(start)) {
+ new = get_new_element();
+ if (!new)
+ return -ENOMEM;
+ new->paddr = start_down;
+ new->size = PAGE_SIZE;
+ list_add_tail(&new->list, vc_list);
+ rest -= min(start_up, end) - start;
+ }

/* Add this contiguous chunk of memory to vmcore list.*/
- new = get_new_element();
- if (!new)
- return -ENOMEM;
- new->paddr = start;
- new->size = size;
- list_add_tail(&new->list, vc_list);
+ if (rest > 0 && start_up < end_down) {
+ new = get_new_element();
+ if (!new)
+ return -ENOMEM;
+ new->paddr = start_up;
+ new->size = end_down - start_up;
+ list_add_tail(&new->list, vc_list);
+ rest -= end_down - start_up;
+ }
+
+ if (rest > 0) {
+ new = get_new_element();
+ if (!new)
+ return -ENOMEM;
+ new->paddr = end_down;
+ new->size = PAGE_SIZE;
+ list_add_tail(&new->list, vc_list);
+ rest -= end - end_down;
+ }
+
+ WARN_ON(rest > 0);

/* Update the program header offset */
- phdr_ptr->p_offset = vmcore_off + (paddr - start);
+ phdr_ptr->p_offset = vmcore_off + (start - start_down);
vmcore_off = vmcore_off + size;
}
return 0;
--
1.8.3.1

--
Thanks.
HATAYAMA, Daisuke

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/