[patch 46/60] x86/ldt: Rename ldt_struct->entries member

From: Thomas Gleixner
Date: Mon Dec 04 2017 - 11:52:19 EST


From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

To support user shared LDT entry mappings it's required to change the LDT
related code so that the kernel side only references the real page mapping
of the LDT. When the LDT is loaded then the entries are alias mapped in the
per cpu fixmap. To catch all users rename ldt_struct->entries and fix them
up.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/events/core.c | 2 +-
arch/x86/include/asm/mmu_context.h | 4 ++--
arch/x86/kernel/ldt.c | 28 +++++++++++++++-------------
arch/x86/kernel/process_64.c | 2 +-
arch/x86/kernel/step.c | 2 +-
arch/x86/lib/insn-eval.c | 2 +-
arch/x86/math-emu/fpu_system.h | 2 +-
7 files changed, 22 insertions(+), 20 deletions(-)

--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2375,7 +2375,7 @@ static unsigned long get_segment_base(un
if (!ldt || idx >= ldt->nr_entries)
return 0;

- desc = &ldt->entries[idx];
+ desc = &ldt->entries_va[idx];
#else
return 0;
#endif
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -50,7 +50,7 @@ struct ldt_struct {
* call gates. On native, we could merge the ldt_struct and LDT
* allocations, but it's not worth trying to optimize.
*/
- struct desc_struct *entries;
+ struct desc_struct *entries_va;
unsigned int nr_entries;
};

@@ -91,7 +91,7 @@ static inline void load_mm_ldt(struct mm
*/

if (unlikely(ldt))
- set_ldt(ldt->entries, ldt->nr_entries);
+ set_ldt(ldt->entries_va, ldt->nr_entries);
else
clear_LDT();
#else
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -52,7 +52,7 @@ static void flush_ldt(void *__mm)
return;

pc = &mm->context;
- set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
+ set_ldt(pc->ldt->entries_va, pc->ldt->nr_entries);

refresh_ldt_segments();
}
@@ -80,11 +80,11 @@ static struct ldt_struct *alloc_ldt_stru
* than PAGE_SIZE.
*/
if (alloc_size > PAGE_SIZE)
- new_ldt->entries = vzalloc(alloc_size);
+ new_ldt->entries_va = vzalloc(alloc_size);
else
- new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
+ new_ldt->entries_va = (void *)get_zeroed_page(GFP_KERNEL);

- if (!new_ldt->entries) {
+ if (!new_ldt->entries_va) {
kfree(new_ldt);
return NULL;
}
@@ -96,7 +96,7 @@ static struct ldt_struct *alloc_ldt_stru
/* After calling this, the LDT is immutable. */
static void finalize_ldt_struct(struct ldt_struct *ldt)
{
- paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
+ paravirt_alloc_ldt(ldt->entries_va, ldt->nr_entries);
}

/* context.lock is held */
@@ -115,11 +115,11 @@ static void free_ldt_struct(struct ldt_s
if (likely(!ldt))
return;

- paravirt_free_ldt(ldt->entries, ldt->nr_entries);
+ paravirt_free_ldt(ldt->entries_va, ldt->nr_entries);
if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree_atomic(ldt->entries);
+ vfree_atomic(ldt->entries_va);
else
- free_page((unsigned long)ldt->entries);
+ free_page((unsigned long)ldt->entries_va);
kfree(ldt);
}

@@ -152,7 +152,7 @@ int init_new_context_ldt(struct task_str
goto out_unlock;
}

- memcpy(new_ldt->entries, old_mm->context.ldt->entries,
+ memcpy(new_ldt->entries_va, old_mm->context.ldt->entries_va,
new_ldt->nr_entries * LDT_ENTRY_SIZE);
finalize_ldt_struct(new_ldt);

@@ -194,7 +194,7 @@ static int read_ldt(void __user *ptr, un
if (entries_size > bytecount)
entries_size = bytecount;

- if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
+ if (copy_to_user(ptr, mm->context.ldt->entries_va, entries_size)) {
retval = -EFAULT;
goto out_unlock;
}
@@ -280,10 +280,12 @@ static int write_ldt(void __user *ptr, u
if (!new_ldt)
goto out_unlock;

- if (old_ldt)
- memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
+ if (old_ldt) {
+ memcpy(new_ldt->entries_va, old_ldt->entries_va,
+ old_nr_entries * LDT_ENTRY_SIZE);
+ }

- new_ldt->entries[ldt_info.entry_number] = ldt;
+ new_ldt->entries_va[ldt_info.entry_number] = ldt;
finalize_ldt_struct(new_ldt);

install_ldt(mm, new_ldt);
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -139,7 +139,7 @@ void release_thread(struct task_struct *
if (dead_task->mm->context.ldt) {
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm,
- dead_task->mm->context.ldt->entries,
+ dead_task->mm->context.ldt->entries_va,
dead_task->mm->context.ldt->nr_entries);
BUG();
}
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -38,7 +38,7 @@ unsigned long convert_ip_to_linear(struc
seg >= child->mm->context.ldt->nr_entries))
addr = -1L; /* bogus selector, access would fault */
else {
- desc = &child->mm->context.ldt->entries[seg];
+ desc = &child->mm->context.ldt->entries_va[seg];
base = get_desc_base(desc);

/* 16-bit code segment? */
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -583,7 +583,7 @@ static struct desc_struct *get_desc(unsi
mutex_lock(&current->active_mm->context.lock);
ldt = current->active_mm->context.ldt;
if (ldt && sel < ldt->nr_entries)
- desc = &ldt->entries[sel];
+ desc = &ldt->entries_va[sel];

mutex_unlock(&current->active_mm->context.lock);

--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -29,7 +29,7 @@ static inline struct desc_struct FPU_get
seg >>= 3;
mutex_lock(&current->mm->context.lock);
if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries)
- ret = current->mm->context.ldt->entries[seg];
+ ret = current->mm->context.ldt->entries_va[seg];
mutex_unlock(&current->mm->context.lock);
#endif
return ret;