Re: Linux 4.9.106

From: Greg KH
Date: Tue Jun 05 2018 - 05:40:29 EST


diff --git a/Makefile b/Makefile
index 7d06dba3fde8..48d87e3a36c1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 105
+SUBLEVEL = 106
EXTRAVERSION =
NAME = Roaring Lionus

diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 34b3fa2889d1..9e32d40d71bd 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules.
#

+OBJECT_FILES_NON_STANDARD := y
+
avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
$(comma)4)$(comma)%ymm2,yes,no)
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
index 2f8756375df5..2e14acc3da25 100644
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ b/arch/x86/crypto/sha1-mb/Makefile
@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules.
#

+OBJECT_FILES_NON_STANDARD := y
+
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
$(comma)4)$(comma)%ymm2,yes,no)
ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
index 41089e7c400c..45b4fca6c4a8 100644
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ b/arch/x86/crypto/sha256-mb/Makefile
@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules.
#

+OBJECT_FILES_NON_STANDARD := y
+
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
$(comma)4)$(comma)%ymm2,yes,no)
ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
new file mode 100644
index 000000000000..7dc777a6cb40
--- /dev/null
+++ b/arch/x86/include/asm/orc_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED 0
+#define ORC_REG_PREV_SP 1
+#define ORC_REG_DX 2
+#define ORC_REG_DI 3
+#define ORC_REG_BP 4
+#define ORC_REG_SP 5
+#define ORC_REG_R10 6
+#define ORC_REG_R13 7
+#define ORC_REG_BP_INDIRECT 8
+#define ORC_REG_SP_INDIRECT 9
+#define ORC_REG_MAX 15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct. They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL 0
+#define ORC_TYPE_REGS 1
+#define ORC_TYPE_REGS_IRET 2
+#define UNWIND_HINT_TYPE_SAVE 3
+#define UNWIND_HINT_TYPE_RESTORE 4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard. It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address. Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+ s16 sp_offset;
+ s16 bp_offset;
+ unsigned sp_reg:4;
+ unsigned bp_reg:4;
+ unsigned type:2;
+};
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
new file mode 100644
index 000000000000..5e02b11c9b86
--- /dev/null
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -0,0 +1,103 @@
+#ifndef _ASM_X86_UNWIND_HINTS_H
+#define _ASM_X86_UNWIND_HINTS_H
+
+#include "orc_types.h"
+
+#ifdef __ASSEMBLY__
+
+/*
+ * In asm, there are two kinds of code: normal C-type callable functions and
+ * the rest. The normal callable functions can be called by other code, and
+ * don't do anything unusual with the stack. Such normal callable functions
+ * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * category. In this case, no special debugging annotations are needed because
+ * objtool can automatically generate the ORC data for the ORC unwinder to read
+ * at runtime.
+ *
+ * Anything which doesn't fall into the above category, such as syscall and
+ * interrupt handlers, tends to not be called directly by other functions, and
+ * often does unusual non-C-function-type things with the stack pointer. Such
+ * code needs to be annotated such that objtool can understand it. The
+ * following CFI hint macros are for this type of code.
+ *
+ * These macros provide hints to objtool about the state of the stack at each
+ * instruction. Objtool starts from the hints and follows the code flow,
+ * making automatic CFI adjustments when it sees pushes and pops, filling out
+ * the debuginfo as necessary. It will also warn if it sees any
+ * inconsistencies.
+ */
+.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
+#ifdef CONFIG_STACK_VALIDATION
+.Lunwind_hint_ip_\@:
+ .pushsection .discard.unwind_hints
+ /* struct unwind_hint */
+ .long .Lunwind_hint_ip_\@ - .
+ .short \sp_offset
+ .byte \sp_reg
+ .byte \type
+ .popsection
+#endif
+.endm
+
+.macro UNWIND_HINT_EMPTY
+ UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
+.endm
+
+.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
+ .if \base == %rsp && \indirect
+ .set sp_reg, ORC_REG_SP_INDIRECT
+ .elseif \base == %rsp
+ .set sp_reg, ORC_REG_SP
+ .elseif \base == %rbp
+ .set sp_reg, ORC_REG_BP
+ .elseif \base == %rdi
+ .set sp_reg, ORC_REG_DI
+ .elseif \base == %rdx
+ .set sp_reg, ORC_REG_DX
+ .elseif \base == %r10
+ .set sp_reg, ORC_REG_R10
+ .else
+ .error "UNWIND_HINT_REGS: bad base register"
+ .endif
+
+ .set sp_offset, \offset
+
+ .if \iret
+ .set type, ORC_TYPE_REGS_IRET
+ .elseif \extra == 0
+ .set type, ORC_TYPE_REGS_IRET
+ .set sp_offset, \offset + (16*8)
+ .else
+ .set type, ORC_TYPE_REGS
+ .endif
+
+ UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type
+.endm
+
+.macro UNWIND_HINT_IRET_REGS base=%rsp offset=0
+ UNWIND_HINT_REGS base=\base offset=\offset iret=1
+.endm
+
+.macro UNWIND_HINT_FUNC sp_offset=8
+ UNWIND_HINT sp_offset=\sp_offset
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#define UNWIND_HINT(sp_reg, sp_offset, type) \
+ "987: \n\t" \
+ ".pushsection .discard.unwind_hints\n\t" \
+ /* struct unwind_hint */ \
+ ".long 987b - .\n\t" \
+ ".short " __stringify(sp_offset) "\n\t" \
+ ".byte " __stringify(sp_reg) "\n\t" \
+ ".byte " __stringify(type) "\n\t" \
+ ".popsection\n\t"
+
+#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
+
+#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_UNWIND_HINTS_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 79076d75bdbf..4c9c61517613 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,6 +29,7 @@ OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_test_nx.o := y
+OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y

# If instrumentation of this dir is enabled, boot hangs during first second.
# Probably could be more selective here, but note that files related to irqs,
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 26b78d86f25a..85a9e17e0dbc 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_wakeup_$(BITS).o := y
+
obj-$(CONFIG_ACPI) += boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
obj-$(CONFIG_ACPI_APEI) += apei.o
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index fa671b90c374..1808a9cc7701 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -28,6 +28,7 @@
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
+#include <linux/frame.h>

#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@@ -91,6 +92,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
}

asm (
+ "optprobe_template_func:\n"
".global optprobe_template_entry\n"
"optprobe_template_entry:\n"
#ifdef CONFIG_X86_64
@@ -128,7 +130,12 @@ asm (
" popf\n"
#endif
".global optprobe_template_end\n"
- "optprobe_template_end:\n");
+ "optprobe_template_end:\n"
+ ".type optprobe_template_func, @function\n"
+ ".size optprobe_template_func, .-optprobe_template_func\n");
+
+void optprobe_template_func(void);
+STACK_FRAME_NON_STANDARD(optprobe_template_func);

#define TMPL_MOVE_IDX \
((long)&optprobe_template_val - (long)&optprobe_template_entry)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 03f21dbfaa9d..4a12362a194a 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/tboot.h>
#include <linux/delay.h>
+#include <linux/frame.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
@@ -127,6 +128,7 @@ void __noreturn machine_real_restart(unsigned int type)
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(machine_real_restart);
#endif
+STACK_FRAME_NON_STANDARD(machine_real_restart);

/*
* Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index c7194e97c3d4..4ef267fb635a 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -353,6 +353,7 @@ SECTIONS
/DISCARD/ : {
*(.eh_frame)
*(__func_stack_frame_non_standard)
+ *(__unreachable)
}
}

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a27f9e442ffc..c4cd1280ac3e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/hashtable.h>
+#include <linux/frame.h>

#include <asm/apic.h>
#include <asm/perf_event.h>
@@ -5111,6 +5112,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)

mark_all_clean(svm->vmcb);
}
+STACK_FRAME_NON_STANDARD(svm_vcpu_run);

static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2827a9622d97..4a66a620fc17 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/hrtimer.h>
+#include <linux/frame.h>
#include <linux/nospec.h>
#include "kvm_cache_regs.h"
#include "x86.h"
@@ -8698,6 +8699,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
);
}
}
+STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);

static bool vmx_has_emulated_msr(int index)
{
@@ -9138,6 +9140,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
}
+STACK_FRAME_NON_STANDARD(vmx_vcpu_run);

static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index c81556409bbb..10ffa7e8519f 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -13,14 +13,14 @@
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
pushq %rbx
- pushq %rbp
+ pushq %r12
movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax
movl 4(%rdi), %ecx
movl 8(%rdi), %edx
movl 12(%rdi), %ebx
- movl 20(%rdi), %ebp
+ movl 20(%rdi), %r12d
movl 24(%rdi), %esi
movl 28(%rdi), %edi
1: \op
@@ -29,10 +29,10 @@ ENTRY(\op\()_safe_regs)
movl %ecx, 4(%r10)
movl %edx, 8(%r10)
movl %ebx, 12(%r10)
- movl %ebp, 20(%r10)
+ movl %r12d, 20(%r10)
movl %esi, 24(%r10)
movl %edi, 28(%r10)
- popq %rbp
+ popq %r12
popq %rbx
ret
3:
diff --git a/arch/x86/net/Makefile b/arch/x86/net/Makefile
index 90568c33ddb0..fefb4b619598 100644
--- a/arch/x86/net/Makefile
+++ b/arch/x86/net/Makefile
@@ -1,4 +1,6 @@
#
# Arch-specific network modules
#
+OBJECT_FILES_NON_STANDARD_bpf_jit.o += y
+
obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index 066619b0700c..7a255022933e 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,4 +1,5 @@
OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y

obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index a6a198c33623..05041871ac90 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y
+
# __restore_processor_state() restores %gs after S3 resume and so should not
# itself be stack-protected
nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index e47e52787d32..4a54059f42ba 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,3 +1,6 @@
+OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
+
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_spinlock.o = -pg
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 081437b5f381..e3a3f5a64884 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -75,6 +75,7 @@
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/cpu.h>
+#include <asm/unwind_hints.h>

#ifdef CONFIG_ACPI
#include <linux/acpi.h>
@@ -1452,10 +1453,12 @@ static void __ref xen_setup_gdt(int cpu)
* GDT. The new GDT has __KERNEL_CS with CS.L = 1
* and we are jumping to reload it.
*/
- asm volatile ("pushq %0\n"
+ asm volatile (UNWIND_HINT_SAVE
+ "pushq %0\n"
"leaq 1f(%%rip),%0\n"
"pushq %0\n"
"lretq\n"
+ UNWIND_HINT_RESTORE
"1:\n"
: "=&r" (dummy) : "0" (__KERNEL_CS));

diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a1b1de17455c..2214b2f9c73c 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -203,6 +203,17 @@
#endif
#endif

+#ifdef CONFIG_STACK_VALIDATION
+#define annotate_unreachable() ({ \
+ asm("1:\t\n" \
+ ".pushsection __unreachable, \"a\"\t\n" \
+ ".long 1b\t\n" \
+ ".popsection\t\n"); \
+})
+#else
+#define annotate_unreachable()
+#endif
+
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
@@ -212,7 +223,8 @@
* this in the preprocessor, but we can live with this because they're
* unreleased. Really, we need to have autoconf for the kernel.
*/
-#define unreachable() __builtin_unreachable()
+#define unreachable() \
+ do { annotate_unreachable(); __builtin_unreachable(); } while (0)

/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 561675589511..f5ab72ebda11 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -38,6 +38,7 @@
#include <linux/syscore_ops.h>
#include <linux/compiler.h>
#include <linux/hugetlb.h>
+#include <linux/frame.h>

#include <asm/page.h>
#include <asm/sections.h>
@@ -878,7 +879,7 @@ int kexec_load_disabled;
* only when panic_cpu holds the current CPU number; this is the only CPU
* which processes crash_kexec routines.
*/
-void __crash_kexec(struct pt_regs *regs)
+void __noclone __crash_kexec(struct pt_regs *regs)
{
/* Take the kexec_mutex here to prevent sys_kexec_load
* running on one cpu from replacing the crash kernel
@@ -900,6 +901,7 @@ void __crash_kexec(struct pt_regs *regs)
mutex_unlock(&kexec_mutex);
}
}
+STACK_FRAME_NON_STANDARD(__crash_kexec);

void crash_kexec(struct pt_regs *regs)
{
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index a2b3eb313a25..0b8cf31d8416 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -84,6 +84,13 @@ struct kvm_regs {
#define KVM_VGIC_V2_DIST_SIZE 0x1000
#define KVM_VGIC_V2_CPU_SIZE 0x2000

+/* Supported VGICv3 address types */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+
+#define KVM_VGIC_V3_DIST_SIZE SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */

@@ -166,6 +173,12 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_VFP_FPINST 0x1009
#define KVM_REG_ARM_VFP_FPINST2 0x100A

+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 3051f86a9b5f..702de7a2b024 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)

+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index c93cf35ce379..0fb1326c3ea2 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -596,6 +596,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)

/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index a2ffec4139ad..81c02e198527 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -197,6 +197,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_VRS (1UL << 6)
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
+#define KVM_SYNC_BPBC (1UL << 10)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -217,7 +218,9 @@ struct kvm_sync_regs {
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
- __u8 padding[52]; /* riccb needs to be 64byte aligned */
+ __u8 bpbc : 1; /* bp mode */
+ __u8 reserved2 : 7;
+ __u8 padding1[51]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
};

diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index f79669a38c0c..c278f276c9b3 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -12,7 +12,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 18 /* N 32-bit words worth of info */
+#define NCAPINTS 19 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */

/*
@@ -189,17 +189,32 @@

#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */

#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */

-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+
+#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */

/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */

+#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
+#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+
+
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
@@ -231,6 +246,7 @@
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
@@ -255,6 +271,10 @@
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
+#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */

/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -290,6 +310,16 @@
#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */

+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
/*
* BUG word(s)
*/
@@ -314,4 +344,10 @@
#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 85599ad4d024..1f8cca459c6c 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -21,11 +21,13 @@
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
+# define DISABLE_PCID 0
#else
# define DISABLE_VME 0
# define DISABLE_K6_MTRR 0
# define DISABLE_CYRIX_ARR 0
# define DISABLE_CENTAUR_MCR 0
+# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
#endif /* CONFIG_X86_64 */

#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -43,7 +45,7 @@
#define DISABLED_MASK1 0
#define DISABLED_MASK2 0
#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
-#define DISABLED_MASK4 0
+#define DISABLED_MASK4 (DISABLE_PCID)
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
#define DISABLED_MASK7 0
@@ -57,6 +59,7 @@
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
#define DISABLED_MASK17 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define DISABLED_MASK18 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)

#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index fac9a5c0abe9..6847d85400a8 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -100,6 +100,7 @@
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 0
#define REQUIRED_MASK17 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define REQUIRED_MASK18 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)

#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 653d1bad77de..0304600121da 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -13,6 +13,7 @@
*/

#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__ffz.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
diff --git a/tools/include/asm-generic/bitops/__ffz.h b/tools/include/asm-generic/bitops/__ffz.h
new file mode 100644
index 000000000000..6744bd4cdf46
--- /dev/null
+++ b/tools/include/asm-generic/bitops/__ffz.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
+#define _ASM_GENERIC_BITOPS_FFZ_H_
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x) __ffs(~(x))
+
+#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h
index 31f51547fcd4..5538ecdc964a 100644
--- a/tools/include/asm-generic/bitops/find.h
+++ b/tools/include/asm-generic/bitops/find.h
@@ -15,6 +15,21 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
#endif

+#ifndef find_next_zero_bit
+
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset);
+#endif
+
#ifndef find_first_bit

/**
@@ -30,4 +45,17 @@ extern unsigned long find_first_bit(const unsigned long *addr,

#endif /* find_first_bit */

+#ifndef find_first_zero_bit
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size);
+#endif
+
#endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/include/linux/atomic.h b/tools/include/linux/atomic.h
index 4e3d3d18ebab..9f21fc2b092b 100644
--- a/tools/include/linux/atomic.h
+++ b/tools/include/linux/atomic.h
@@ -3,4 +3,10 @@

#include <asm/atomic.h>

+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_relaxed atomic_cmpxchg
+#define atomic_cmpxchg_release atomic_cmpxchg
+#endif /* atomic_cmpxchg_relaxed */
+
#endif /* __TOOLS_LINUX_ATOMIC_H */
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 43c1c5021e4b..eef41d500e9e 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -35,6 +35,32 @@ static inline void bitmap_zero(unsigned long *dst, int nbits)
}
}

+static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+{
+ unsigned int nlongs = BITS_TO_LONGS(nbits);
+ if (!small_const_nbits(nbits)) {
+ unsigned int len = (nlongs - 1) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
+ }
+ dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+{
+ if (small_const_nbits(nbits))
+ return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+
+ return find_first_bit(src, nbits) == nbits;
+}
+
+static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+
+ return find_first_zero_bit(src, nbits) == nbits;
+}
+
static inline int bitmap_weight(const unsigned long *src, int nbits)
{
if (small_const_nbits(nbits))
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 49c929a104ee..fc446343ff41 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -39,6 +39,11 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))

+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = find_first_zero_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_from(bit, addr, size) \
for ((bit) = find_next_bit((addr), (size), (bit)); \
diff --git a/tools/include/linux/bug.h b/tools/include/linux/bug.h
new file mode 100644
index 000000000000..8e4a4f49135d
--- /dev/null
+++ b/tools/include/linux/bug.h
@@ -0,0 +1,10 @@
+#ifndef _TOOLS_PERF_LINUX_BUG_H
+#define _TOOLS_PERF_LINUX_BUG_H
+
+/* Force a compilation error if condition is true, but also produce a
+ result (of value 0 and type size_t), so the expression can be used
+ e.g. in a structure initializer (or where-ever else comma expressions
+ aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+
+#endif /* _TOOLS_PERF_LINUX_BUG_H */
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
new file mode 100644
index 000000000000..825d44f89a29
--- /dev/null
+++ b/tools/include/linux/compiler-gcc.h
@@ -0,0 +1,21 @@
+#ifndef _TOOLS_LINUX_COMPILER_H_
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+
+#if GCC_VERSION >= 70000 && !defined(__CHECKER__)
+# define __fallthrough __attribute__ ((fallthrough))
+#endif
+
+#if GCC_VERSION >= 40300
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* GCC_VERSION >= 40300 */
+
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index d94179f94caa..23299d7e7160 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -1,6 +1,14 @@
#ifndef _TOOLS_LINUX_COMPILER_H_
#define _TOOLS_LINUX_COMPILER_H_

+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#ifndef __compiletime_error
+# define __compiletime_error(message)
+#endif
+
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
@@ -9,6 +17,11 @@
# define __always_inline inline __attribute__((always_inline))
#endif

+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
#ifdef __ANDROID__
/*
* FIXME: Big hammer to get rid of tons of:
@@ -21,6 +34,8 @@
#endif

#define __user
+#define __rcu
+#define __read_mostly

#ifndef __attribute_const__
# define __attribute_const__
@@ -50,6 +65,8 @@
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif

+#define uninitialized_var(x) x = *(&(x))
+
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))

#include <linux/types.h>
@@ -128,11 +145,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s


#ifndef __fallthrough
-# if defined(__GNUC__) && __GNUC__ >= 7
-# define __fallthrough __attribute__ ((fallthrough))
-# else
-# define __fallthrough
-# endif
+# define __fallthrough
#endif

#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/hashtable.h b/tools/include/linux/hashtable.h
index c65cc0aa2659..251eabf2a05e 100644
--- a/tools/include/linux/hashtable.h
+++ b/tools/include/linux/hashtable.h
@@ -13,10 +13,6 @@
#include <linux/hash.h>
#include <linux/log2.h>

-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
#define DEFINE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 28607db02bd3..73ccc48126bb 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -4,6 +4,11 @@
#include <stdarg.h>
#include <stddef.h>
#include <assert.h>
+#include <linux/compiler.h>
+
+#ifndef UINT_MAX
+#define UINT_MAX (~0U)
+#endif

#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))

@@ -72,6 +77,8 @@
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
int scnprintf(char * buf, size_t size, const char * fmt, ...);

+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
/*
* This looks more complex than it should be. But we need to
* get the type for the ~ right in round_down (it needs to be
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
index d5677d39c1e4..0325cefc2220 100644
--- a/tools/include/linux/log2.h
+++ b/tools/include/linux/log2.h
@@ -12,6 +12,9 @@
#ifndef _TOOLS_LINUX_LOG2_H
#define _TOOLS_LINUX_LOG2_H

+#include <linux/bitops.h>
+#include <linux/types.h>
+
/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
diff --git a/tools/include/linux/refcount.h b/tools/include/linux/refcount.h
new file mode 100644
index 000000000000..a0177c1f55b1
--- /dev/null
+++ b/tools/include/linux/refcount.h
@@ -0,0 +1,151 @@
+#ifndef _TOOLS_LINUX_REFCOUNT_H
+#define _TOOLS_LINUX_REFCOUNT_H
+
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+
+#ifdef NDEBUG
+#define REFCOUNT_WARN(cond, str) (void)(cond)
+#define __refcount_check
+#else
+#define REFCOUNT_WARN(cond, str) BUG_ON(cond)
+#define __refcount_check __must_check
+#endif
+
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+
+static inline void refcount_set(refcount_t *r, unsigned int n)
+{
+ atomic_set(&r->refs, n);
+}
+
+static inline unsigned int refcount_read(const refcount_t *r)
+{
+ return atomic_read(&r->refs);
+}
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_inc_not_zero(refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ new = val + 1;
+
+ if (!val)
+ return false;
+
+ if (unlikely(!new))
+ return true;
+
+ old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+ return true;
+}
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+static inline void refcount_inc(refcount_t *r)
+{
+ REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+static inline __refcount_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (unlikely(val == UINT_MAX))
+ return false;
+
+ new = val - i;
+ if (new > val) {
+ REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ return false;
+ }
+
+ old = atomic_cmpxchg_release(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ return !new;
+}
+
+static inline __refcount_check
+bool refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_sub_and_test(1, r);
+}
+
+
+#endif /* _ATOMIC_LINUX_REFCOUNT_H */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
new file mode 100644
index 000000000000..58397dcb19d6
--- /dev/null
+++ b/tools/include/linux/spinlock.h
@@ -0,0 +1,5 @@
+#define spinlock_t pthread_mutex_t
+#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
+
+#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
+#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h
index 8ebf6278b2ef..77a28a26a670 100644
--- a/tools/include/linux/types.h
+++ b/tools/include/linux/types.h
@@ -7,6 +7,7 @@

#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
#include <asm/types.h>
+#include <asm/posix_types.h>

struct page;
struct kmem_cache;
@@ -42,11 +43,7 @@ typedef __s8 s8;
#else
#define __bitwise__
#endif
-#ifdef __CHECK_ENDIAN__
#define __bitwise __bitwise__
-#else
-#define __bitwise
-#endif

#define __force
#define __user
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 58274382a616..8c27db0c5c08 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -72,4 +72,9 @@
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_MASK 0x3f

+#define PKEY_DISABLE_ACCESS 0x1
+#define PKEY_DISABLE_WRITE 0x2
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE)
+
#endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 9e5fc168c8a3..f09c70b97eca 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -95,6 +95,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
+ BPF_PROG_TYPE_PERF_EVENT,
};

#define BPF_PSEUDO_MAP_FD 1
@@ -375,6 +376,56 @@ enum bpf_func_id {
*/
BPF_FUNC_probe_write_user,

+ /**
+ * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
+ * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ * @index: index of the cgroup in the bpf_map
+ * Return:
+ * == 0 current failed the cgroup2 descendant test
+ * == 1 current succeeded the cgroup2 descendant test
+ * < 0 error
+ */
+ BPF_FUNC_current_task_under_cgroup,
+
+ /**
+ * bpf_skb_change_tail(skb, len, flags)
+ * The helper will resize the skb to the given new size,
+ * to be used f.e. with control messages.
+ * @skb: pointer to skb
+ * @len: new skb length
+ * @flags: reserved
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_change_tail,
+
+ /**
+ * bpf_skb_pull_data(skb, len)
+ * The helper will pull in non-linear data in case the
+ * skb is non-linear and not all of len are part of the
+ * linear section. Only needed for read/write with direct
+ * packet access.
+ * @skb: pointer to skb
+ * @len: len to make read/writeable
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_pull_data,
+
+ /**
+ * bpf_csum_update(skb, csum)
+ * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ * @skb: pointer to skb
+ * @csum: csum to add
+ * Return: csum on success or negative error
+ */
+ BPF_FUNC_csum_update,
+
+ /**
+ * bpf_set_hash_invalid(skb)
+ * Invalidate current skb>hash.
+ * @skb: pointer to skb
+ */
+ BPF_FUNC_set_hash_invalid,
+
__BPF_FUNC_MAX_ID,
};

diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
new file mode 100644
index 000000000000..beed138bd359
--- /dev/null
+++ b/tools/include/uapi/linux/fcntl.h
@@ -0,0 +1,67 @@
+#ifndef _UAPI_LINUX_FCNTL_H
+#define _UAPI_LINUX_FCNTL_H
+
+#include <asm/fcntl.h>
+
+#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
+#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
+
+/*
+ * Cancel a blocking posix lock; internal use only until we expose an
+ * asynchronous lock api to userspace:
+ */
+#define F_CANCELLK (F_LINUX_SPECIFIC_BASE + 5)
+
+/* Create a file descriptor with FD_CLOEXEC set. */
+#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
+
+/*
+ * Request nofications on a directory.
+ * See below for events that may be notified.
+ */
+#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
+
+/*
+ * Set and get of pipe page size array
+ */
+#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
+#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8)
+
+/*
+ * Set/Get seals
+ */
+#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
+#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
+
+/*
+ * Types of seals
+ */
+#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
+#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
+#define F_SEAL_GROW 0x0004 /* prevent file from growing */
+#define F_SEAL_WRITE 0x0008 /* prevent writes */
+/* (1U << 31) is reserved for signed error codes */
+
+/*
+ * Types of directory notifications that may be requested.
+ */
+#define DN_ACCESS 0x00000001 /* File accessed */
+#define DN_MODIFY 0x00000002 /* File modified */
+#define DN_CREATE 0x00000004 /* File created */
+#define DN_DELETE 0x00000008 /* File removed */
+#define DN_RENAME 0x00000010 /* File renamed */
+#define DN_ATTRIB 0x00000020 /* File changed attibutes */
+#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
+
+#define AT_FDCWD -100 /* Special value used to indicate
+ openat should use the current
+ working directory. */
+#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
+#define AT_REMOVEDIR 0x200 /* Remove directory instead of
+ unlinking file. */
+#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
+#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
+#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
+
+
+#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
new file mode 100644
index 000000000000..7fec7e36d921
--- /dev/null
+++ b/tools/include/uapi/linux/stat.h
@@ -0,0 +1,45 @@
+#ifndef _UAPI_LINUX_STAT_H
+#define _UAPI_LINUX_STAT_H
+
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#define S_IFMT 00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK 0120000
+#define S_IFREG 0100000
+#define S_IFBLK 0060000
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFIFO 0010000
+#define S_ISUID 0004000
+#define S_ISGID 0002000
+#define S_ISVTX 0001000
+
+#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+
+#endif
+
+
+#endif /* _UAPI_LINUX_STAT_H */
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index 9122a9e80046..6d8b8f22cf55 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -82,3 +82,28 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
return size;
}
#endif
+
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
+ if (addr[idx] != ~0UL)
+ return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
+ }
+
+ return size;
+}
+#endif
+
+#ifndef find_next_zero_bit
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit(addr, size, offset, ~0UL);
+}
+#endif
diff --git a/tools/objtool/Build b/tools/objtool/Build
index d6cdece5e58b..749becdf5b90 100644
--- a/tools/objtool/Build
+++ b/tools/objtool/Build
@@ -1,5 +1,9 @@
objtool-y += arch/$(SRCARCH)/
objtool-y += builtin-check.o
+objtool-y += builtin-orc.o
+objtool-y += check.o
+objtool-y += orc_gen.o
+objtool-y += orc_dump.o
objtool-y += elf.o
objtool-y += special.o
objtool-y += objtool.o
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 55a60d331f47..3995735a878f 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -11,9 +11,6 @@ analyzes every .o file and ensures the validity of its stack metadata.
It enforces a set of rules on asm code and C inline assembly code so
that stack traces can be reliable.

-Currently it only checks frame pointer usage, but there are plans to add
-CFI validation for C files and CFI generation for asm files.
-
For each function, it recursively follows all possible code paths and
validates the correct frame pointer state at each instruction.

@@ -23,6 +20,10 @@ alternative execution paths to a given instruction (or set of
instructions). Similarly, it knows how to follow switch statements, for
which gcc sometimes uses jump tables.

+(Objtool also has an 'orc generate' subcommand which generates debuginfo
+for the ORC unwinder. See Documentation/x86/orc-unwinder.txt in the
+kernel tree for more details.)
+

Why do we need stack metadata validation?
-----------------------------------------
@@ -93,62 +94,24 @@ a) More reliable stack traces for frame pointer enabled kernels
or at the very end of the function after the stack frame has been
destroyed. This is an inherent limitation of frame pointers.

-b) 100% reliable stack traces for DWARF enabled kernels
-
- (NOTE: This is not yet implemented)
-
- As an alternative to frame pointers, DWARF Call Frame Information
- (CFI) metadata can be used to walk the stack. Unlike frame pointers,
- CFI metadata is out of band. So it doesn't affect runtime
- performance and it can be reliable even when interrupts or exceptions
- are involved.
-
- For C code, gcc automatically generates DWARF CFI metadata. But for
- asm code, generating CFI is a tedious manual approach which requires
- manually placed .cfi assembler macros to be scattered throughout the
- code. It's clumsy and very easy to get wrong, and it makes the real
- code harder to read.
-
- Stacktool will improve this situation in several ways. For code
- which already has CFI annotations, it will validate them. For code
- which doesn't have CFI annotations, it will generate them. So an
- architecture can opt to strip out all the manual .cfi annotations
- from their asm code and have objtool generate them instead.
+b) ORC (Oops Rewind Capability) unwind table generation

- We might also add a runtime stack validation debug option where we
- periodically walk the stack from schedule() and/or an NMI to ensure
- that the stack metadata is sane and that we reach the bottom of the
- stack.
+ An alternative to frame pointers and DWARF, ORC unwind data can be
+ used to walk the stack. Unlike frame pointers, ORC data is out of
+ band. So it doesn't affect runtime performance and it can be
+ reliable even when interrupts or exceptions are involved.

- So the benefit of objtool here will be that external tooling should
- always show perfect stack traces. And the same will be true for
- kernel warning/oops traces if the architecture has a runtime DWARF
- unwinder.
+ For more details, see Documentation/x86/orc-unwinder.txt.

c) Higher live patching compatibility rate

- (NOTE: This is not yet implemented)
-
- Currently with CONFIG_LIVEPATCH there's a basic live patching
- framework which is safe for roughly 85-90% of "security" fixes. But
- patches can't have complex features like function dependency or
- prototype changes, or data structure changes.
-
- There's a strong need to support patches which have the more complex
- features so that the patch compatibility rate for security fixes can
- eventually approach something resembling 100%. To achieve that, a
- "consistency model" is needed, which allows tasks to be safely
- transitioned from an unpatched state to a patched state.
-
- One of the key requirements of the currently proposed livepatch
- consistency model [*] is that it needs to walk the stack of each
- sleeping task to determine if it can be transitioned to the patched
- state. If objtool can ensure that stack traces are reliable, this
- consistency model can be used and the live patching compatibility
- rate can be improved significantly.
-
- [*] https://lkml.kernel.org/r/cover.1423499826.git.jpoimboe@xxxxxxxxxx
+ Livepatch has an optional "consistency model", which is needed for
+ more complex patches. In order for the consistency model to work,
+ stack traces need to be reliable (or an unreliable condition needs to
+ be detectable). Objtool makes that possible.

+ For more details, see the livepatch documentation in the Linux kernel
+ source tree at Documentation/livepatch/livepatch.txt.

Rules
-----
@@ -201,80 +164,84 @@ To achieve the validation, objtool enforces the following rules:
return normally.


-Errors in .S files
-------------------
+Objtool warnings
+----------------
+
+For asm files, if you're getting an error which doesn't make sense,
+first make sure that the affected code follows the above rules.

-If you're getting an error in a compiled .S file which you don't
-understand, first make sure that the affected code follows the above
-rules.
+For C files, the common culprits are inline asm statements and calls to
+"noreturn" functions. See below for more details.
+
+Another possible cause for errors in C code is if the Makefile removes
+-fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.

Here are some examples of common warnings reported by objtool, what
they mean, and suggestions for how to fix them.


-1. asm_file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
+1. file.o: warning: objtool: func()+0x128: call without frame pointer save/setup

The func() function made a function call without first saving and/or
- updating the frame pointer.
-
- If func() is indeed a callable function, add proper frame pointer
- logic using the FRAME_BEGIN and FRAME_END macros. Otherwise, remove
- its ELF function annotation by changing ENDPROC to END.
-
- If you're getting this error in a .c file, see the "Errors in .c
- files" section.
+ updating the frame pointer, and CONFIG_FRAME_POINTER is enabled.

+ If the error is for an asm file, and func() is indeed a callable
+ function, add proper frame pointer logic using the FRAME_BEGIN and
+ FRAME_END macros. Otherwise, if it's not a callable function, remove
+ its ELF function annotation by changing ENDPROC to END, and instead
+ use the manual unwind hint macros in asm/unwind_hints.h.

-2. asm_file.o: warning: objtool: .text+0x53: return instruction outside of a callable function
+ If it's a GCC-compiled .c file, the error may be because the function
+ uses an inline asm() statement which has a "call" instruction. An
+ asm() statement with a call instruction must declare the use of the
+ stack pointer in its output operand. On x86_64, this means adding
+ the ASM_CALL_CONSTRAINT as an output constraint:

- A return instruction was detected, but objtool couldn't find a way
- for a callable function to reach the instruction.
+ asm volatile("call func" : ASM_CALL_CONSTRAINT);

- If the return instruction is inside (or reachable from) a callable
- function, the function needs to be annotated with the ENTRY/ENDPROC
- macros.
+ Otherwise the stack frame may not get created before the call.

- If you _really_ need a return instruction outside of a function, and
- are 100% sure that it won't affect stack traces, you can tell
- objtool to ignore it. See the "Adding exceptions" section below.

+2. file.o: warning: objtool: .text+0x53: unreachable instruction

-3. asm_file.o: warning: objtool: func()+0x9: function has unreachable instruction
+ Objtool couldn't find a code path to reach the instruction.

- The instruction lives inside of a callable function, but there's no
- possible control flow path from the beginning of the function to the
- instruction.
+ If the error is for an asm file, and the instruction is inside (or
+ reachable from) a callable function, the function should be annotated
+ with the ENTRY/ENDPROC macros (ENDPROC is the important one).
+ Otherwise, the code should probably be annotated with the unwind hint
+ macros in asm/unwind_hints.h so objtool and the unwinder can know the
+ stack state associated with the code.

- If the instruction is actually needed, and it's actually in a
- callable function, ensure that its function is properly annotated
- with ENTRY/ENDPROC.
+ If you're 100% sure the code won't affect stack traces, or if you're
+ a just a bad person, you can tell objtool to ignore it. See the
+ "Adding exceptions" section below.

If it's not actually in a callable function (e.g. kernel entry code),
change ENDPROC to END.


-4. asm_file.o: warning: objtool: func(): can't find starting instruction
+4. file.o: warning: objtool: func(): can't find starting instruction
or
- asm_file.o: warning: objtool: func()+0x11dd: can't decode instruction
+ file.o: warning: objtool: func()+0x11dd: can't decode instruction

- Did you put data in a text section? If so, that can confuse
+ Does the file have data in a text section? If so, that can confuse
objtool's instruction decoder. Move the data to a more appropriate
section like .data or .rodata.


-5. asm_file.o: warning: objtool: func()+0x6: kernel entry/exit from callable instruction
-
- This is a kernel entry/exit instruction like sysenter or sysret.
- Such instructions aren't allowed in a callable function, and are most
- likely part of the kernel entry code.
+5. file.o: warning: objtool: func()+0x6: unsupported instruction in callable function

- If the instruction isn't actually in a callable function, change
- ENDPROC to END.
+ This is a kernel entry/exit instruction like sysenter or iret. Such
+ instructions aren't allowed in a callable function, and are most
+ likely part of the kernel entry code. They should usually not have
+ the callable function annotation (ENDPROC) and should always be
+ annotated with the unwind hint macros in asm/unwind_hints.h.


-6. asm_file.o: warning: objtool: func()+0x26: sibling call from callable instruction with changed frame pointer
+6. file.o: warning: objtool: func()+0x26: sibling call from callable instruction with modified stack frame

- This is a dynamic jump or a jump to an undefined symbol. Stacktool
+ This is a dynamic jump or a jump to an undefined symbol. Objtool
assumed it's a sibling call and detected that the frame pointer
wasn't first restored to its original state.

@@ -282,24 +249,28 @@ they mean, and suggestions for how to fix them.
destination code to the local file.

If the instruction is not actually in a callable function (e.g.
- kernel entry code), change ENDPROC to END.
+ kernel entry code), change ENDPROC to END and annotate manually with
+ the unwind hint macros in asm/unwind_hints.h.


-7. asm_file: warning: objtool: func()+0x5c: frame pointer state mismatch
+7. file: warning: objtool: func()+0x5c: stack state mismatch

The instruction's frame pointer state is inconsistent, depending on
which execution path was taken to reach the instruction.

- Make sure the function pushes and sets up the frame pointer (for
- x86_64, this means rbp) at the beginning of the function and pops it
- at the end of the function. Also make sure that no other code in the
- function touches the frame pointer.
+ Make sure that, when CONFIG_FRAME_POINTER is enabled, the function
+ pushes and sets up the frame pointer (for x86_64, this means rbp) at
+ the beginning of the function and pops it at the end of the function.
+ Also make sure that no other code in the function touches the frame
+ pointer.

+ Another possibility is that the code has some asm or inline asm which
+ does some unusual things to the stack or the frame pointer. In such
+ cases it's probably appropriate to use the unwind hint macros in
+ asm/unwind_hints.h.

-Errors in .c files
-------------------

-1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
+8. file.o: warning: objtool: funcA() falls through to next function funcB()

This means that funcA() doesn't end with a return instruction or an
unconditional jump, and that objtool has determined that the function
@@ -318,22 +289,6 @@ Errors in .c files
might be corrupt due to a gcc bug. For more details, see:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646

-2. If you're getting any other objtool error in a compiled .c file, it
- may be because the file uses an asm() statement which has a "call"
- instruction. An asm() statement with a call instruction must declare
- the use of the stack pointer in its output operand. For example, on
- x86_64:
-
- register void *__sp asm("rsp");
- asm volatile("call func" : "+r" (__sp));
-
- Otherwise the stack frame may not get created before the call.
-
-3. Another possible cause for errors in C code is if the Makefile removes
- -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
-
-Also see the above section for .S file errors for more information what
-the individual error messages mean.

If the error doesn't seem to make sense, it could be a bug in objtool.
Feel free to ask the objtool maintainer for help.
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 041b493ad3ab..e6acc281dd37 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
include ../scripts/Makefile.include
include ../scripts/Makefile.arch

@@ -6,17 +7,19 @@ ARCH := x86
endif

# always use the host compiler
-CC = gcc
-LD = ld
-AR = ar
+HOSTCC ?= gcc
+HOSTLD ?= ld
+CC = $(HOSTCC)
+LD = $(HOSTLD)
+AR = ar

ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif

SUBCMD_SRCDIR = $(srctree)/tools/lib/subcmd/
-LIBSUBCMD_OUTPUT = $(if $(OUTPUT),$(OUTPUT),$(PWD)/)
+LIBSUBCMD_OUTPUT = $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a

OBJTOOL := $(OUTPUT)objtool
@@ -24,8 +27,11 @@ OBJTOOL_IN := $(OBJTOOL)-in.o

all: $(OBJTOOL)

-INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
-CFLAGS += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
+INCLUDES := -I$(srctree)/tools/include \
+ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+ -I$(srctree)/tools/objtool/arch/$(ARCH)/include
+WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
+CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
LDFLAGS += -lelf $(LIBSUBCMD)

# Allow old libelf to be used:
@@ -39,19 +45,8 @@ include $(srctree)/tools/build/Makefile.include
$(OBJTOOL_IN): fixdep FORCE
@$(MAKE) $(build)=objtool

-# Busybox's diff doesn't have -I, avoid warning in that case
-#
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
- @(diff -I 2>&1 | grep -q 'option requires an argument' && \
- test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
- diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
- diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
- diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
- diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
- || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
+ @$(CONFIG_SHELL) ./sync-check.sh
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@


@@ -61,7 +56,7 @@ $(LIBSUBCMD): fixdep FORCE
clean:
$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
- $(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
+ $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep

FORCE:

diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h
index f7350fcedc70..b0d7dc3d71b5 100644
--- a/tools/objtool/arch.h
+++ b/tools/objtool/arch.h
@@ -19,26 +19,64 @@
#define _ARCH_H

#include <stdbool.h>
+#include <linux/list.h>
#include "elf.h"
+#include "cfi.h"

-#define INSN_FP_SAVE 1
-#define INSN_FP_SETUP 2
-#define INSN_FP_RESTORE 3
-#define INSN_JUMP_CONDITIONAL 4
-#define INSN_JUMP_UNCONDITIONAL 5
-#define INSN_JUMP_DYNAMIC 6
-#define INSN_CALL 7
-#define INSN_CALL_DYNAMIC 8
-#define INSN_RETURN 9
-#define INSN_CONTEXT_SWITCH 10
-#define INSN_BUG 11
-#define INSN_NOP 12
-#define INSN_OTHER 13
+#define INSN_JUMP_CONDITIONAL 1
+#define INSN_JUMP_UNCONDITIONAL 2
+#define INSN_JUMP_DYNAMIC 3
+#define INSN_CALL 4
+#define INSN_CALL_DYNAMIC 5
+#define INSN_RETURN 6
+#define INSN_CONTEXT_SWITCH 7
+#define INSN_STACK 8
+#define INSN_BUG 9
+#define INSN_NOP 10
+#define INSN_OTHER 11
#define INSN_LAST INSN_OTHER

+enum op_dest_type {
+ OP_DEST_REG,
+ OP_DEST_REG_INDIRECT,
+ OP_DEST_MEM,
+ OP_DEST_PUSH,
+ OP_DEST_LEAVE,
+};
+
+struct op_dest {
+ enum op_dest_type type;
+ unsigned char reg;
+ int offset;
+};
+
+enum op_src_type {
+ OP_SRC_REG,
+ OP_SRC_REG_INDIRECT,
+ OP_SRC_CONST,
+ OP_SRC_POP,
+ OP_SRC_ADD,
+ OP_SRC_AND,
+};
+
+struct op_src {
+ enum op_src_type type;
+ unsigned char reg;
+ int offset;
+};
+
+struct stack_op {
+ struct op_dest dest;
+ struct op_src src;
+};
+
+void arch_initial_func_cfi_state(struct cfi_state *state);
+
int arch_decode_instruction(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int maxlen,
unsigned int *len, unsigned char *type,
- unsigned long *displacement);
+ unsigned long *immediate, struct stack_op *op);
+
+bool arch_callee_saved_reg(unsigned char reg);

#endif /* _ARCH_H */
diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
index debbdb0b5c43..b998412c017d 100644
--- a/tools/objtool/arch/x86/Build
+++ b/tools/objtool/arch/x86/Build
@@ -1,12 +1,12 @@
objtool-y += decode.o

-inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
-inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
+inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
+inat_tables_maps = arch/x86/lib/x86-opcode-map.txt

-$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
$(call rule_mkdir)
$(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@

-$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
+$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c

-CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
+CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 039636ffb6c8..540a209b78ab 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -19,14 +19,25 @@
#include <stdlib.h>

#define unlikely(cond) (cond)
-#include "insn/insn.h"
-#include "insn/inat.c"
-#include "insn/insn.c"
+#include <asm/insn.h>
+#include "lib/inat.c"
+#include "lib/insn.c"

#include "../../elf.h"
#include "../../arch.h"
#include "../../warn.h"

+static unsigned char op_to_cfi_reg[][2] = {
+ {CFI_AX, CFI_R8},
+ {CFI_CX, CFI_R9},
+ {CFI_DX, CFI_R10},
+ {CFI_BX, CFI_R11},
+ {CFI_SP, CFI_R12},
+ {CFI_BP, CFI_R13},
+ {CFI_SI, CFI_R14},
+ {CFI_DI, CFI_R15},
+};
+
static int is_x86_64(struct elf *elf)
{
switch (elf->ehdr.e_machine) {
@@ -40,24 +51,50 @@ static int is_x86_64(struct elf *elf)
}
}

+bool arch_callee_saved_reg(unsigned char reg)
+{
+ switch (reg) {
+ case CFI_BP:
+ case CFI_BX:
+ case CFI_R12:
+ case CFI_R13:
+ case CFI_R14:
+ case CFI_R15:
+ return true;
+
+ case CFI_AX:
+ case CFI_CX:
+ case CFI_DX:
+ case CFI_SI:
+ case CFI_DI:
+ case CFI_SP:
+ case CFI_R8:
+ case CFI_R9:
+ case CFI_R10:
+ case CFI_R11:
+ case CFI_RA:
+ default:
+ return false;
+ }
+}
+
int arch_decode_instruction(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int maxlen,
unsigned int *len, unsigned char *type,
- unsigned long *immediate)
+ unsigned long *immediate, struct stack_op *op)
{
struct insn insn;
- int x86_64;
- unsigned char op1, op2, ext;
+ int x86_64, sign;
+ unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
+ rex_x = 0, modrm = 0, modrm_mod = 0, modrm_rm = 0,
+ modrm_reg = 0, sib = 0;

x86_64 = is_x86_64(elf);
if (x86_64 == -1)
return -1;

- insn_init(&insn, (void *)(sec->data + offset), maxlen, x86_64);
+ insn_init(&insn, sec->data->d_buf + offset, maxlen, x86_64);
insn_get_length(&insn);
- insn_get_opcode(&insn);
- insn_get_modrm(&insn);
- insn_get_immediate(&insn);

if (!insn_complete(&insn)) {
WARN_FUNC("can't decode instruction", sec, offset);
@@ -73,70 +110,317 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
op1 = insn.opcode.bytes[0];
op2 = insn.opcode.bytes[1];

+ if (insn.rex_prefix.nbytes) {
+ rex = insn.rex_prefix.bytes[0];
+ rex_w = X86_REX_W(rex) >> 3;
+ rex_r = X86_REX_R(rex) >> 2;
+ rex_x = X86_REX_X(rex) >> 1;
+ rex_b = X86_REX_B(rex);
+ }
+
+ if (insn.modrm.nbytes) {
+ modrm = insn.modrm.bytes[0];
+ modrm_mod = X86_MODRM_MOD(modrm);
+ modrm_reg = X86_MODRM_REG(modrm);
+ modrm_rm = X86_MODRM_RM(modrm);
+ }
+
+ if (insn.sib.nbytes)
+ sib = insn.sib.bytes[0];
+
switch (op1) {
- case 0x55:
- if (!insn.rex_prefix.nbytes)
- /* push rbp */
- *type = INSN_FP_SAVE;
+
+ case 0x1:
+ case 0x29:
+ if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+
+ /* add/sub reg, %rsp */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
+ break;
+
+ case 0x50 ... 0x57:
+
+ /* push reg */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_REG;
+ op->src.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+ op->dest.type = OP_DEST_PUSH;
+
+ break;
+
+ case 0x58 ... 0x5f:
+
+ /* pop reg */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+
break;

- case 0x5d:
- if (!insn.rex_prefix.nbytes)
- /* pop rbp */
- *type = INSN_FP_RESTORE;
+ case 0x68:
+ case 0x6a:
+ /* push immediate */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSH;
break;

case 0x70 ... 0x7f:
*type = INSN_JUMP_CONDITIONAL;
break;

+ case 0x81:
+ case 0x83:
+ if (rex != 0x48)
+ break;
+
+ if (modrm == 0xe4) {
+ /* and imm, %rsp */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_AND;
+ op->src.reg = CFI_SP;
+ op->src.offset = insn.immediate.value;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ break;
+ }
+
+ if (modrm == 0xc4)
+ sign = 1;
+ else if (modrm == 0xec)
+ sign = -1;
+ else
+ break;
+
+ /* add/sub imm, %rsp */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = CFI_SP;
+ op->src.offset = insn.immediate.value * sign;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ break;
+
case 0x89:
- if (insn.rex_prefix.nbytes == 1 &&
- insn.rex_prefix.bytes[0] == 0x48 &&
- insn.modrm.nbytes && insn.modrm.bytes[0] == 0xe5)
- /* mov rsp, rbp */
- *type = INSN_FP_SETUP;
+ if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) {
+
+ /* mov %rsp, reg */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_REG;
+ op->src.reg = CFI_SP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
+ break;
+ }
+
+ if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+
+ /* mov reg, %rsp */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_REG;
+ op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ break;
+ }
+
+ /* fallthrough */
+ case 0x88:
+ if (!rex_b &&
+ (modrm_mod == 1 || modrm_mod == 2) && modrm_rm == 5) {
+
+ /* mov reg, disp(%rbp) */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_REG;
+ op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->dest.type = OP_DEST_REG_INDIRECT;
+ op->dest.reg = CFI_BP;
+ op->dest.offset = insn.displacement.value;
+
+ } else if (rex_w && !rex_b && modrm_rm == 4 && sib == 0x24) {
+
+ /* mov reg, disp(%rsp) */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_REG;
+ op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ op->dest.type = OP_DEST_REG_INDIRECT;
+ op->dest.reg = CFI_SP;
+ op->dest.offset = insn.displacement.value;
+ }
+
+ break;
+
+ case 0x8b:
+ if (rex_w && !rex_b && modrm_mod == 1 && modrm_rm == 5) {
+
+ /* mov disp(%rbp), reg */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_REG_INDIRECT;
+ op->src.reg = CFI_BP;
+ op->src.offset = insn.displacement.value;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+
+ } else if (rex_w && !rex_b && sib == 0x24 &&
+ modrm_mod != 3 && modrm_rm == 4) {
+
+ /* mov disp(%rsp), reg */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_REG_INDIRECT;
+ op->src.reg = CFI_SP;
+ op->src.offset = insn.displacement.value;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ }
+
break;

case 0x8d:
- if (insn.rex_prefix.nbytes &&
- insn.rex_prefix.bytes[0] == 0x48 &&
- insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
- insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
- /* lea %(rsp), %rbp */
- *type = INSN_FP_SETUP;
+ if (sib == 0x24 && rex_w && !rex_b && !rex_x) {
+
+ *type = INSN_STACK;
+ if (!insn.displacement.value) {
+ /* lea (%rsp), reg */
+ op->src.type = OP_SRC_REG;
+ } else {
+ /* lea disp(%rsp), reg */
+ op->src.type = OP_SRC_ADD;
+ op->src.offset = insn.displacement.value;
+ }
+ op->src.reg = CFI_SP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+
+ } else if (rex == 0x48 && modrm == 0x65) {
+
+ /* lea disp(%rbp), %rsp */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = CFI_BP;
+ op->src.offset = insn.displacement.value;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+
+ } else if (rex == 0x49 && modrm == 0x62 &&
+ insn.displacement.value == -8) {
+
+ /*
+ * lea -0x8(%r10), %rsp
+ *
+ * Restoring rsp back to its original value after a
+ * stack realignment.
+ */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = CFI_R10;
+ op->src.offset = -8;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+
+ } else if (rex == 0x49 && modrm == 0x65 &&
+ insn.displacement.value == -16) {
+
+ /*
+ * lea -0x10(%r13), %rsp
+ *
+ * Restoring rsp back to its original value after a
+ * stack realignment.
+ */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = CFI_R13;
+ op->src.offset = -16;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
+
+ break;
+
+ case 0x8f:
+ /* pop to mem */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_MEM;
break;

case 0x90:
*type = INSN_NOP;
break;

+ case 0x9c:
+ /* pushf */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSH;
+ break;
+
+ case 0x9d:
+ /* popf */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_MEM;
+ break;
+
case 0x0f:
- if (op2 >= 0x80 && op2 <= 0x8f)
+
+ if (op2 >= 0x80 && op2 <= 0x8f) {
+
*type = INSN_JUMP_CONDITIONAL;
- else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
- op2 == 0x35)
+
+ } else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
+ op2 == 0x35) {
+
/* sysenter, sysret */
*type = INSN_CONTEXT_SWITCH;
- else if (op2 == 0x0b || op2 == 0xb9)
+
+ } else if (op2 == 0x0b || op2 == 0xb9) {
+
/* ud2 */
*type = INSN_BUG;
- else if (op2 == 0x0d || op2 == 0x1f)
+
+ } else if (op2 == 0x0d || op2 == 0x1f) {
+
/* nopl/nopw */
*type = INSN_NOP;
- else if (op2 == 0x01 && insn.modrm.nbytes &&
- (insn.modrm.bytes[0] == 0xc2 ||
- insn.modrm.bytes[0] == 0xd8))
- /* vmlaunch, vmrun */
- *type = INSN_CONTEXT_SWITCH;
+
+ } else if (op2 == 0xa0 || op2 == 0xa8) {
+
+ /* push fs/gs */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSH;
+
+ } else if (op2 == 0xa1 || op2 == 0xa9) {
+
+ /* pop fs/gs */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_MEM;
+ }

break;

- case 0xc9: /* leave */
- *type = INSN_FP_RESTORE;
+ case 0xc9:
+ /*
+ * leave
+ *
+ * equivalent to:
+ * mov bp, sp
+ * pop bp
+ */
+ *type = INSN_STACK;
+ op->dest.type = OP_DEST_LEAVE;
+
break;

- case 0xe3: /* jecxz/jrcxz */
+ case 0xe3:
+ /* jecxz/jrcxz */
*type = INSN_JUMP_CONDITIONAL;
break;

@@ -161,14 +445,27 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
break;

case 0xff:
- ext = X86_MODRM_REG(insn.modrm.bytes[0]);
- if (ext == 2 || ext == 3)
+ if (modrm_reg == 2 || modrm_reg == 3)
+
*type = INSN_CALL_DYNAMIC;
- else if (ext == 4)
+
+ else if (modrm_reg == 4)
+
*type = INSN_JUMP_DYNAMIC;
- else if (ext == 5) /*jmpf */
+
+ else if (modrm_reg == 5)
+
+ /* jmpf */
*type = INSN_CONTEXT_SWITCH;

+ else if (modrm_reg == 6) {
+
+ /* push from mem */
+ *type = INSN_STACK;
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSH;
+ }
+
break;

default:
@@ -179,3 +476,21 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,

return 0;
}
+
+void arch_initial_func_cfi_state(struct cfi_state *state)
+{
+ int i;
+
+ for (i = 0; i < CFI_NUM_REGS; i++) {
+ state->regs[i].base = CFI_UNDEFINED;
+ state->regs[i].offset = 0;
+ }
+
+ /* initial CFA (call frame address) */
+ state->cfa.base = CFI_SP;
+ state->cfa.offset = 8;
+
+ /* initial RA (return address) */
+ state->regs[16].base = CFI_CFA;
+ state->regs[16].offset = -8;
+}
diff --git a/tools/objtool/arch/x86/include/asm/inat.h b/tools/objtool/arch/x86/include/asm/inat.h
new file mode 100644
index 000000000000..02aff0867211
--- /dev/null
+++ b/tools/objtool/arch/x86/include/asm/inat.h
@@ -0,0 +1,234 @@
+#ifndef _ASM_X86_INAT_H
+#define _ASM_X86_INAT_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <asm/inat_types.h>
+
+/*
+ * Internal bits. Don't use bitmasks directly, because these bits are
+ * unstable. You should use checking functions.
+ */
+
+#define INAT_OPCODE_TABLE_SIZE 256
+#define INAT_GROUP_TABLE_SIZE 8
+
+/* Legacy last prefixes */
+#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */
+#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */
+#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */
+/* Other Legacy prefixes */
+#define INAT_PFX_LOCK 4 /* 0xF0 */
+#define INAT_PFX_CS 5 /* 0x2E */
+#define INAT_PFX_DS 6 /* 0x3E */
+#define INAT_PFX_ES 7 /* 0x26 */
+#define INAT_PFX_FS 8 /* 0x64 */
+#define INAT_PFX_GS 9 /* 0x65 */
+#define INAT_PFX_SS 10 /* 0x36 */
+#define INAT_PFX_ADDRSZ 11 /* 0x67 */
+/* x86-64 REX prefix */
+#define INAT_PFX_REX 12 /* 0x4X */
+/* AVX VEX prefixes */
+#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
+#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
+#define INAT_PFX_EVEX 15 /* EVEX prefix */
+
+#define INAT_LSTPFX_MAX 3
+#define INAT_LGCPFX_MAX 11
+
+/* Immediate size */
+#define INAT_IMM_BYTE 1
+#define INAT_IMM_WORD 2
+#define INAT_IMM_DWORD 3
+#define INAT_IMM_QWORD 4
+#define INAT_IMM_PTR 5
+#define INAT_IMM_VWORD32 6
+#define INAT_IMM_VWORD 7
+
+/* Legacy prefix */
+#define INAT_PFX_OFFS 0
+#define INAT_PFX_BITS 4
+#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
+#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
+/* Escape opcodes */
+#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS)
+#define INAT_ESC_BITS 2
+#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1)
+#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS)
+/* Group opcodes (1-16) */
+#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS)
+#define INAT_GRP_BITS 5
+#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1)
+#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS)
+/* Immediates */
+#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS)
+#define INAT_IMM_BITS 3
+#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
+/* Flags */
+#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
+#define INAT_MODRM (1 << (INAT_FLAG_OFFS))
+#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1))
+#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2))
+#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
+#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
+#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
+#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
+#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
+/* Attribute making macros for attribute tables */
+#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
+#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
+#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
+#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
+
+/* Attribute search APIs */
+extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
+extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
+ int lpfx_id,
+ insn_attr_t esc_attr);
+extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
+ int lpfx_id,
+ insn_attr_t esc_attr);
+extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
+ insn_byte_t vex_m,
+ insn_byte_t vex_pp);
+
+/* Attribute checking functions */
+static inline int inat_is_legacy_prefix(insn_attr_t attr)
+{
+ attr &= INAT_PFX_MASK;
+ return attr && attr <= INAT_LGCPFX_MAX;
+}
+
+static inline int inat_is_address_size_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
+}
+
+static inline int inat_is_operand_size_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
+}
+
+static inline int inat_is_rex_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
+}
+
+static inline int inat_last_prefix_id(insn_attr_t attr)
+{
+ if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
+ return 0;
+ else
+ return attr & INAT_PFX_MASK;
+}
+
+static inline int inat_is_vex_prefix(insn_attr_t attr)
+{
+ attr &= INAT_PFX_MASK;
+ return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
+ attr == INAT_PFX_EVEX;
+}
+
+static inline int inat_is_evex_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
+}
+
+static inline int inat_is_vex3_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
+}
+
+static inline int inat_is_escape(insn_attr_t attr)
+{
+ return attr & INAT_ESC_MASK;
+}
+
+static inline int inat_escape_id(insn_attr_t attr)
+{
+ return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
+}
+
+static inline int inat_is_group(insn_attr_t attr)
+{
+ return attr & INAT_GRP_MASK;
+}
+
+static inline int inat_group_id(insn_attr_t attr)
+{
+ return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
+}
+
+static inline int inat_group_common_attribute(insn_attr_t attr)
+{
+ return attr & ~INAT_GRP_MASK;
+}
+
+static inline int inat_has_immediate(insn_attr_t attr)
+{
+ return attr & INAT_IMM_MASK;
+}
+
+static inline int inat_immediate_size(insn_attr_t attr)
+{
+ return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
+}
+
+static inline int inat_has_modrm(insn_attr_t attr)
+{
+ return attr & INAT_MODRM;
+}
+
+static inline int inat_is_force64(insn_attr_t attr)
+{
+ return attr & INAT_FORCE64;
+}
+
+static inline int inat_has_second_immediate(insn_attr_t attr)
+{
+ return attr & INAT_SCNDIMM;
+}
+
+static inline int inat_has_moffset(insn_attr_t attr)
+{
+ return attr & INAT_MOFFSET;
+}
+
+static inline int inat_has_variant(insn_attr_t attr)
+{
+ return attr & INAT_VARIANT;
+}
+
+static inline int inat_accept_vex(insn_attr_t attr)
+{
+ return attr & INAT_VEXOK;
+}
+
+static inline int inat_must_vex(insn_attr_t attr)
+{
+ return attr & (INAT_VEXONLY | INAT_EVEXONLY);
+}
+
+static inline int inat_must_evex(insn_attr_t attr)
+{
+ return attr & INAT_EVEXONLY;
+}
+#endif
diff --git a/tools/objtool/arch/x86/include/asm/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h
new file mode 100644
index 000000000000..cb3c20ce39cf
--- /dev/null
+++ b/tools/objtool/arch/x86/include/asm/inat_types.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_X86_INAT_TYPES_H
+#define _ASM_X86_INAT_TYPES_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+/* Instruction attributes */
+typedef unsigned int insn_attr_t;
+typedef unsigned char insn_byte_t;
+typedef signed int insn_value_t;
+
+#endif
diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
new file mode 100644
index 000000000000..b3e32b010ab1
--- /dev/null
+++ b/tools/objtool/arch/x86/include/asm/insn.h
@@ -0,0 +1,211 @@
+#ifndef _ASM_X86_INSN_H
+#define _ASM_X86_INSN_H
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ */
+
+/* insn_attr_t is defined in inat.h */
+#include <asm/inat.h>
+
+struct insn_field {
+ union {
+ insn_value_t value;
+ insn_byte_t bytes[4];
+ };
+ /* !0 if we've run insn_get_xxx() for this field */
+ unsigned char got;
+ unsigned char nbytes;
+};
+
+struct insn {
+ struct insn_field prefixes; /*
+ * Prefixes
+ * prefixes.bytes[3]: last prefix
+ */
+ struct insn_field rex_prefix; /* REX prefix */
+ struct insn_field vex_prefix; /* VEX prefix */
+ struct insn_field opcode; /*
+ * opcode.bytes[0]: opcode1
+ * opcode.bytes[1]: opcode2
+ * opcode.bytes[2]: opcode3
+ */
+ struct insn_field modrm;
+ struct insn_field sib;
+ struct insn_field displacement;
+ union {
+ struct insn_field immediate;
+ struct insn_field moffset1; /* for 64bit MOV */
+ struct insn_field immediate1; /* for 64bit imm or off16/32 */
+ };
+ union {
+ struct insn_field moffset2; /* for 64bit MOV */
+ struct insn_field immediate2; /* for 64bit imm or seg16 */
+ };
+
+ insn_attr_t attr;
+ unsigned char opnd_bytes;
+ unsigned char addr_bytes;
+ unsigned char length;
+ unsigned char x86_64;
+
+ const insn_byte_t *kaddr; /* kernel address of insn to analyze */
+ const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */
+ const insn_byte_t *next_byte;
+};
+
+#define MAX_INSN_SIZE 15
+
+#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
+
+#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
+#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
+#define X86_SIB_BASE(sib) ((sib) & 0x07)
+
+#define X86_REX_W(rex) ((rex) & 8)
+#define X86_REX_R(rex) ((rex) & 4)
+#define X86_REX_X(rex) ((rex) & 2)
+#define X86_REX_B(rex) ((rex) & 1)
+
+/* VEX bit flags */
+#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
+#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
+#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
+#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
+#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
+/* VEX bit fields */
+#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
+#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
+#define X86_VEX2_M 1 /* VEX2.M always 1 */
+#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
+
+extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
+extern void insn_get_prefixes(struct insn *insn);
+extern void insn_get_opcode(struct insn *insn);
+extern void insn_get_modrm(struct insn *insn);
+extern void insn_get_sib(struct insn *insn);
+extern void insn_get_displacement(struct insn *insn);
+extern void insn_get_immediate(struct insn *insn);
+extern void insn_get_length(struct insn *insn);
+
+/* Attribute will be determined after getting ModRM (for opcode groups) */
+static inline void insn_get_attribute(struct insn *insn)
+{
+ insn_get_modrm(insn);
+}
+
+/* Instruction uses RIP-relative addressing */
+extern int insn_rip_relative(struct insn *insn);
+
+/* Init insn for kernel text */
+static inline void kernel_insn_init(struct insn *insn,
+ const void *kaddr, int buf_len)
+{
+#ifdef CONFIG_X86_64
+ insn_init(insn, kaddr, buf_len, 1);
+#else /* CONFIG_X86_32 */
+ insn_init(insn, kaddr, buf_len, 0);
+#endif
+}
+
+static inline int insn_is_avx(struct insn *insn)
+{
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+ return (insn->vex_prefix.value != 0);
+}
+
+static inline int insn_is_evex(struct insn *insn)
+{
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+ return (insn->vex_prefix.nbytes == 4);
+}
+
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+ insn->displacement.got && insn->immediate.got;
+}
+
+static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
+{
+ if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
+ return X86_VEX2_M;
+ else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VEX */
+ return X86_VEX3_M(insn->vex_prefix.bytes[1]);
+ else /* EVEX */
+ return X86_EVEX_M(insn->vex_prefix.bytes[1]);
+}
+
+static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
+{
+ if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
+ return X86_VEX_P(insn->vex_prefix.bytes[1]);
+ else
+ return X86_VEX_P(insn->vex_prefix.bytes[2]);
+}
+
+/* Get the last prefix id from last prefix or VEX prefix */
+static inline int insn_last_prefix_id(struct insn *insn)
+{
+ if (insn_is_avx(insn))
+ return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
+
+ if (insn->prefixes.bytes[3])
+ return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
+
+ return 0;
+}
+
+/* Offset of each field from kaddr */
+static inline int insn_offset_rex_prefix(struct insn *insn)
+{
+ return insn->prefixes.nbytes;
+}
+static inline int insn_offset_vex_prefix(struct insn *insn)
+{
+ return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
+}
+static inline int insn_offset_opcode(struct insn *insn)
+{
+ return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
+}
+static inline int insn_offset_modrm(struct insn *insn)
+{
+ return insn_offset_opcode(insn) + insn->opcode.nbytes;
+}
+static inline int insn_offset_sib(struct insn *insn)
+{
+ return insn_offset_modrm(insn) + insn->modrm.nbytes;
+}
+static inline int insn_offset_displacement(struct insn *insn)
+{
+ return insn_offset_sib(insn) + insn->sib.nbytes;
+}
+static inline int insn_offset_immediate(struct insn *insn)
+{
+ return insn_offset_displacement(insn) + insn->displacement.nbytes;
+}
+
+#endif /* _ASM_X86_INSN_H */
diff --git a/tools/objtool/arch/x86/include/asm/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
new file mode 100644
index 000000000000..7dc777a6cb40
--- /dev/null
+++ b/tools/objtool/arch/x86/include/asm/orc_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED 0
+#define ORC_REG_PREV_SP 1
+#define ORC_REG_DX 2
+#define ORC_REG_DI 3
+#define ORC_REG_BP 4
+#define ORC_REG_SP 5
+#define ORC_REG_R10 6
+#define ORC_REG_R13 7
+#define ORC_REG_BP_INDIRECT 8
+#define ORC_REG_SP_INDIRECT 9
+#define ORC_REG_MAX 15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct. They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL 0
+#define ORC_TYPE_REGS 1
+#define ORC_TYPE_REGS_IRET 2
+#define UNWIND_HINT_TYPE_SAVE 3
+#define UNWIND_HINT_TYPE_RESTORE 4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard. It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address. Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+ s16 sp_offset;
+ s16 bp_offset;
+ unsigned sp_reg:4;
+ unsigned bp_reg:4;
+ unsigned type:2;
+};
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
deleted file mode 100644
index a3d2c62fd805..000000000000
--- a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
+++ /dev/null
@@ -1,392 +0,0 @@
-#!/bin/awk -f
-# gen-insn-attr-x86.awk: Instruction attribute table generator
-# Written by Masami Hiramatsu <mhiramat@xxxxxxxxxx>
-#
-# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
-
-# Awk implementation sanity check
-function check_awk_implement() {
- if (sprintf("%x", 0) != "0")
- return "Your awk has a printf-format problem."
- return ""
-}
-
-# Clear working vars
-function clear_vars() {
- delete table
- delete lptable2
- delete lptable1
- delete lptable3
- eid = -1 # escape id
- gid = -1 # group id
- aid = -1 # AVX id
- tname = ""
-}
-
-BEGIN {
- # Implementation error checking
- awkchecked = check_awk_implement()
- if (awkchecked != "") {
- print "Error: " awkchecked > "/dev/stderr"
- print "Please try to use gawk." > "/dev/stderr"
- exit 1
- }
-
- # Setup generating tables
- print "/* x86 opcode map generated from x86-opcode-map.txt */"
- print "/* Do not change this code. */\n"
- ggid = 1
- geid = 1
- gaid = 0
- delete etable
- delete gtable
- delete atable
-
- opnd_expr = "^[A-Za-z/]"
- ext_expr = "^\\("
- sep_expr = "^\\|$"
- group_expr = "^Grp[0-9A-Za-z]+"
-
- imm_expr = "^[IJAOL][a-z]"
- imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
- imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
- imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
- imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
- imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
- imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
- imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
- imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
- imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
- imm_flag["Ob"] = "INAT_MOFFSET"
- imm_flag["Ov"] = "INAT_MOFFSET"
- imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
-
- modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
- force64_expr = "\\([df]64\\)"
- rex_expr = "^REX(\\.[XRWB]+)*"
- fpu_expr = "^ESC" # TODO
-
- lprefix1_expr = "\\((66|!F3)\\)"
- lprefix2_expr = "\\(F3\\)"
- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
- lprefix_expr = "\\((66|F2|F3)\\)"
- max_lprefix = 4
-
- # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
- # accepts VEX prefix
- vexok_opcode_expr = "^[vk].*"
- vexok_expr = "\\(v1\\)"
- # All opcodes with (v) superscript supports *only* VEX prefix
- vexonly_expr = "\\(v\\)"
- # All opcodes with (ev) superscript supports *only* EVEX prefix
- evexonly_expr = "\\(ev\\)"
-
- prefix_expr = "\\(Prefix\\)"
- prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
- prefix_num["REPNE"] = "INAT_PFX_REPNE"
- prefix_num["REP/REPE"] = "INAT_PFX_REPE"
- prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
- prefix_num["XRELEASE"] = "INAT_PFX_REPE"
- prefix_num["LOCK"] = "INAT_PFX_LOCK"
- prefix_num["SEG=CS"] = "INAT_PFX_CS"
- prefix_num["SEG=DS"] = "INAT_PFX_DS"
- prefix_num["SEG=ES"] = "INAT_PFX_ES"
- prefix_num["SEG=FS"] = "INAT_PFX_FS"
- prefix_num["SEG=GS"] = "INAT_PFX_GS"
- prefix_num["SEG=SS"] = "INAT_PFX_SS"
- prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
- prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
- prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
- prefix_num["EVEX"] = "INAT_PFX_EVEX"
-
- clear_vars()
-}
-
-function semantic_error(msg) {
- print "Semantic error at " NR ": " msg > "/dev/stderr"
- exit 1
-}
-
-function debug(msg) {
- print "DEBUG: " msg
-}
-
-function array_size(arr, i,c) {
- c = 0
- for (i in arr)
- c++
- return c
-}
-
-/^Table:/ {
- print "/* " $0 " */"
- if (tname != "")
- semantic_error("Hit Table: before EndTable:.");
-}
-
-/^Referrer:/ {
- if (NF != 1) {
- # escape opcode table
- ref = ""
- for (i = 2; i <= NF; i++)
- ref = ref $i
- eid = escape[ref]
- tname = sprintf("inat_escape_table_%d", eid)
- }
-}
-
-/^AVXcode:/ {
- if (NF != 1) {
- # AVX/escape opcode table
- aid = $2
- if (gaid <= aid)
- gaid = aid + 1
- if (tname == "") # AVX only opcode table
- tname = sprintf("inat_avx_table_%d", $2)
- }
- if (aid == -1 && eid == -1) # primary opcode table
- tname = "inat_primary_table"
-}
-
-/^GrpTable:/ {
- print "/* " $0 " */"
- if (!($2 in group))
- semantic_error("No group: " $2 )
- gid = group[$2]
- tname = "inat_group_table_" gid
-}
-
-function print_table(tbl,name,fmt,n)
-{
- print "const insn_attr_t " name " = {"
- for (i = 0; i < n; i++) {
- id = sprintf(fmt, i)
- if (tbl[id])
- print " [" id "] = " tbl[id] ","
- }
- print "};"
-}
-
-/^EndTable/ {
- if (gid != -1) {
- # print group tables
- if (array_size(table) != 0) {
- print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
- "0x%x", 8)
- gtable[gid,0] = tname
- }
- if (array_size(lptable1) != 0) {
- print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
- "0x%x", 8)
- gtable[gid,1] = tname "_1"
- }
- if (array_size(lptable2) != 0) {
- print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
- "0x%x", 8)
- gtable[gid,2] = tname "_2"
- }
- if (array_size(lptable3) != 0) {
- print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
- "0x%x", 8)
- gtable[gid,3] = tname "_3"
- }
- } else {
- # print primary/escaped tables
- if (array_size(table) != 0) {
- print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
- "0x%02x", 256)
- etable[eid,0] = tname
- if (aid >= 0)
- atable[aid,0] = tname
- }
- if (array_size(lptable1) != 0) {
- print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
- "0x%02x", 256)
- etable[eid,1] = tname "_1"
- if (aid >= 0)
- atable[aid,1] = tname "_1"
- }
- if (array_size(lptable2) != 0) {
- print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
- "0x%02x", 256)
- etable[eid,2] = tname "_2"
- if (aid >= 0)
- atable[aid,2] = tname "_2"
- }
- if (array_size(lptable3) != 0) {
- print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
- "0x%02x", 256)
- etable[eid,3] = tname "_3"
- if (aid >= 0)
- atable[aid,3] = tname "_3"
- }
- }
- print ""
- clear_vars()
-}
-
-function add_flags(old,new) {
- if (old && new)
- return old " | " new
- else if (old)
- return old
- else
- return new
-}
-
-# convert operands to flags.
-function convert_operands(count,opnd, i,j,imm,mod)
-{
- imm = null
- mod = null
- for (j = 1; j <= count; j++) {
- i = opnd[j]
- if (match(i, imm_expr) == 1) {
- if (!imm_flag[i])
- semantic_error("Unknown imm opnd: " i)
- if (imm) {
- if (i != "Ib")
- semantic_error("Second IMM error")
- imm = add_flags(imm, "INAT_SCNDIMM")
- } else
- imm = imm_flag[i]
- } else if (match(i, modrm_expr))
- mod = "INAT_MODRM"
- }
- return add_flags(imm, mod)
-}
-
-/^[0-9a-f]+\:/ {
- if (NR == 1)
- next
- # get index
- idx = "0x" substr($1, 1, index($1,":") - 1)
- if (idx in table)
- semantic_error("Redefine " idx " in " tname)
-
- # check if escaped opcode
- if ("escape" == $2) {
- if ($3 != "#")
- semantic_error("No escaped name")
- ref = ""
- for (i = 4; i <= NF; i++)
- ref = ref $i
- if (ref in escape)
- semantic_error("Redefine escape (" ref ")")
- escape[ref] = geid
- geid++
- table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
- next
- }
-
- variant = null
- # converts
- i = 2
- while (i <= NF) {
- opcode = $(i++)
- delete opnds
- ext = null
- flags = null
- opnd = null
- # parse one opcode
- if (match($i, opnd_expr)) {
- opnd = $i
- count = split($(i++), opnds, ",")
- flags = convert_operands(count, opnds)
- }
- if (match($i, ext_expr))
- ext = $(i++)
- if (match($i, sep_expr))
- i++
- else if (i < NF)
- semantic_error($i " is not a separator")
-
- # check if group opcode
- if (match(opcode, group_expr)) {
- if (!(opcode in group)) {
- group[opcode] = ggid
- ggid++
- }
- flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
- }
- # check force(or default) 64bit
- if (match(ext, force64_expr))
- flags = add_flags(flags, "INAT_FORCE64")
-
- # check REX prefix
- if (match(opcode, rex_expr))
- flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
-
- # check coprocessor escape : TODO
- if (match(opcode, fpu_expr))
- flags = add_flags(flags, "INAT_MODRM")
-
- # check VEX codes
- if (match(ext, evexonly_expr))
- flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
- else if (match(ext, vexonly_expr))
- flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
- else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
- flags = add_flags(flags, "INAT_VEXOK")
-
- # check prefixes
- if (match(ext, prefix_expr)) {
- if (!prefix_num[opcode])
- semantic_error("Unknown prefix: " opcode)
- flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
- }
- if (length(flags) == 0)
- continue
- # check if last prefix
- if (match(ext, lprefix1_expr)) {
- lptable1[idx] = add_flags(lptable1[idx],flags)
- variant = "INAT_VARIANT"
- }
- if (match(ext, lprefix2_expr)) {
- lptable2[idx] = add_flags(lptable2[idx],flags)
- variant = "INAT_VARIANT"
- }
- if (match(ext, lprefix3_expr)) {
- lptable3[idx] = add_flags(lptable3[idx],flags)
- variant = "INAT_VARIANT"
- }
- if (!match(ext, lprefix_expr)){
- table[idx] = add_flags(table[idx],flags)
- }
- }
- if (variant)
- table[idx] = add_flags(table[idx],variant)
-}
-
-END {
- if (awkchecked != "")
- exit 1
- # print escape opcode map's array
- print "/* Escape opcode map array */"
- print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
- "[INAT_LSTPFX_MAX + 1] = {"
- for (i = 0; i < geid; i++)
- for (j = 0; j < max_lprefix; j++)
- if (etable[i,j])
- print " ["i"]["j"] = "etable[i,j]","
- print "};\n"
- # print group opcode map's array
- print "/* Group opcode map array */"
- print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
- "[INAT_LSTPFX_MAX + 1] = {"
- for (i = 0; i < ggid; i++)
- for (j = 0; j < max_lprefix; j++)
- if (gtable[i,j])
- print " ["i"]["j"] = "gtable[i,j]","
- print "};\n"
- # print AVX opcode map's array
- print "/* AVX opcode map array */"
- print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
- "[INAT_LSTPFX_MAX + 1] = {"
- for (i = 0; i < gaid; i++)
- for (j = 0; j < max_lprefix; j++)
- if (atable[i,j])
- print " ["i"]["j"] = "atable[i,j]","
- print "};"
-}
-
diff --git a/tools/objtool/arch/x86/insn/inat.c b/tools/objtool/arch/x86/insn/inat.c
deleted file mode 100644
index e4bf28e6f4c7..000000000000
--- a/tools/objtool/arch/x86/insn/inat.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * x86 instruction attribute tables
- *
- * Written by Masami Hiramatsu <mhiramat@xxxxxxxxxx>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#include "insn.h"
-
-/* Attribute tables are generated from opcode map */
-#include "inat-tables.c"
-
-/* Attribute search APIs */
-insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
-{
- return inat_primary_table[opcode];
-}
-
-int inat_get_last_prefix_id(insn_byte_t last_pfx)
-{
- insn_attr_t lpfx_attr;
-
- lpfx_attr = inat_get_opcode_attribute(last_pfx);
- return inat_last_prefix_id(lpfx_attr);
-}
-
-insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
- insn_attr_t esc_attr)
-{
- const insn_attr_t *table;
- int n;
-
- n = inat_escape_id(esc_attr);
-
- table = inat_escape_tables[n][0];
- if (!table)
- return 0;
- if (inat_has_variant(table[opcode]) && lpfx_id) {
- table = inat_escape_tables[n][lpfx_id];
- if (!table)
- return 0;
- }
- return table[opcode];
-}
-
-insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
- insn_attr_t grp_attr)
-{
- const insn_attr_t *table;
- int n;
-
- n = inat_group_id(grp_attr);
-
- table = inat_group_tables[n][0];
- if (!table)
- return inat_group_common_attribute(grp_attr);
- if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
- table = inat_group_tables[n][lpfx_id];
- if (!table)
- return inat_group_common_attribute(grp_attr);
- }
- return table[X86_MODRM_REG(modrm)] |
- inat_group_common_attribute(grp_attr);
-}
-
-insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
- insn_byte_t vex_p)
-{
- const insn_attr_t *table;
- if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
- return 0;
- /* At first, this checks the master table */
- table = inat_avx_tables[vex_m][0];
- if (!table)
- return 0;
- if (!inat_is_group(table[opcode]) && vex_p) {
- /* If this is not a group, get attribute directly */
- table = inat_avx_tables[vex_m][vex_p];
- if (!table)
- return 0;
- }
- return table[opcode];
-}
-
diff --git a/tools/objtool/arch/x86/insn/inat.h b/tools/objtool/arch/x86/insn/inat.h
deleted file mode 100644
index 125ecd2a300d..000000000000
--- a/tools/objtool/arch/x86/insn/inat.h
+++ /dev/null
@@ -1,234 +0,0 @@
-#ifndef _ASM_X86_INAT_H
-#define _ASM_X86_INAT_H
-/*
- * x86 instruction attributes
- *
- * Written by Masami Hiramatsu <mhiramat@xxxxxxxxxx>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#include "inat_types.h"
-
-/*
- * Internal bits. Don't use bitmasks directly, because these bits are
- * unstable. You should use checking functions.
- */
-
-#define INAT_OPCODE_TABLE_SIZE 256
-#define INAT_GROUP_TABLE_SIZE 8
-
-/* Legacy last prefixes */
-#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */
-#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */
-#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */
-/* Other Legacy prefixes */
-#define INAT_PFX_LOCK 4 /* 0xF0 */
-#define INAT_PFX_CS 5 /* 0x2E */
-#define INAT_PFX_DS 6 /* 0x3E */
-#define INAT_PFX_ES 7 /* 0x26 */
-#define INAT_PFX_FS 8 /* 0x64 */
-#define INAT_PFX_GS 9 /* 0x65 */
-#define INAT_PFX_SS 10 /* 0x36 */
-#define INAT_PFX_ADDRSZ 11 /* 0x67 */
-/* x86-64 REX prefix */
-#define INAT_PFX_REX 12 /* 0x4X */
-/* AVX VEX prefixes */
-#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
-#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
-#define INAT_PFX_EVEX 15 /* EVEX prefix */
-
-#define INAT_LSTPFX_MAX 3
-#define INAT_LGCPFX_MAX 11
-
-/* Immediate size */
-#define INAT_IMM_BYTE 1
-#define INAT_IMM_WORD 2
-#define INAT_IMM_DWORD 3
-#define INAT_IMM_QWORD 4
-#define INAT_IMM_PTR 5
-#define INAT_IMM_VWORD32 6
-#define INAT_IMM_VWORD 7
-
-/* Legacy prefix */
-#define INAT_PFX_OFFS 0
-#define INAT_PFX_BITS 4
-#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
-#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
-/* Escape opcodes */
-#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS)
-#define INAT_ESC_BITS 2
-#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1)
-#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS)
-/* Group opcodes (1-16) */
-#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS)
-#define INAT_GRP_BITS 5
-#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1)
-#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS)
-/* Immediates */
-#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS)
-#define INAT_IMM_BITS 3
-#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
-/* Flags */
-#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
-#define INAT_MODRM (1 << (INAT_FLAG_OFFS))
-#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1))
-#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2))
-#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
-#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
-#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
-#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
-#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
-/* Attribute making macros for attribute tables */
-#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
-#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
-#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
-#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
-
-/* Attribute search APIs */
-extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
-extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
-extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
- int lpfx_id,
- insn_attr_t esc_attr);
-extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
- int lpfx_id,
- insn_attr_t esc_attr);
-extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
- insn_byte_t vex_m,
- insn_byte_t vex_pp);
-
-/* Attribute checking functions */
-static inline int inat_is_legacy_prefix(insn_attr_t attr)
-{
- attr &= INAT_PFX_MASK;
- return attr && attr <= INAT_LGCPFX_MAX;
-}
-
-static inline int inat_is_address_size_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
-}
-
-static inline int inat_is_operand_size_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
-}
-
-static inline int inat_is_rex_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
-}
-
-static inline int inat_last_prefix_id(insn_attr_t attr)
-{
- if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
- return 0;
- else
- return attr & INAT_PFX_MASK;
-}
-
-static inline int inat_is_vex_prefix(insn_attr_t attr)
-{
- attr &= INAT_PFX_MASK;
- return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
- attr == INAT_PFX_EVEX;
-}
-
-static inline int inat_is_evex_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
-}
-
-static inline int inat_is_vex3_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
-}
-
-static inline int inat_is_escape(insn_attr_t attr)
-{
- return attr & INAT_ESC_MASK;
-}
-
-static inline int inat_escape_id(insn_attr_t attr)
-{
- return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
-}
-
-static inline int inat_is_group(insn_attr_t attr)
-{
- return attr & INAT_GRP_MASK;
-}
-
-static inline int inat_group_id(insn_attr_t attr)
-{
- return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
-}
-
-static inline int inat_group_common_attribute(insn_attr_t attr)
-{
- return attr & ~INAT_GRP_MASK;
-}
-
-static inline int inat_has_immediate(insn_attr_t attr)
-{
- return attr & INAT_IMM_MASK;
-}
-
-static inline int inat_immediate_size(insn_attr_t attr)
-{
- return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
-}
-
-static inline int inat_has_modrm(insn_attr_t attr)
-{
- return attr & INAT_MODRM;
-}
-
-static inline int inat_is_force64(insn_attr_t attr)
-{
- return attr & INAT_FORCE64;
-}
-
-static inline int inat_has_second_immediate(insn_attr_t attr)
-{
- return attr & INAT_SCNDIMM;
-}
-
-static inline int inat_has_moffset(insn_attr_t attr)
-{
- return attr & INAT_MOFFSET;
-}
-
-static inline int inat_has_variant(insn_attr_t attr)
-{
- return attr & INAT_VARIANT;
-}
-
-static inline int inat_accept_vex(insn_attr_t attr)
-{
- return attr & INAT_VEXOK;
-}
-
-static inline int inat_must_vex(insn_attr_t attr)
-{
- return attr & (INAT_VEXONLY | INAT_EVEXONLY);
-}
-
-static inline int inat_must_evex(insn_attr_t attr)
-{
- return attr & INAT_EVEXONLY;
-}
-#endif
diff --git a/tools/objtool/arch/x86/insn/inat_types.h b/tools/objtool/arch/x86/insn/inat_types.h
deleted file mode 100644
index cb3c20ce39cf..000000000000
--- a/tools/objtool/arch/x86/insn/inat_types.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _ASM_X86_INAT_TYPES_H
-#define _ASM_X86_INAT_TYPES_H
-/*
- * x86 instruction attributes
- *
- * Written by Masami Hiramatsu <mhiramat@xxxxxxxxxx>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-
-/* Instruction attributes */
-typedef unsigned int insn_attr_t;
-typedef unsigned char insn_byte_t;
-typedef signed int insn_value_t;
-
-#endif
diff --git a/tools/objtool/arch/x86/insn/insn.c b/tools/objtool/arch/x86/insn/insn.c
deleted file mode 100644
index ca983e2bea8b..000000000000
--- a/tools/objtool/arch/x86/insn/insn.c
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * x86 instruction analysis
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004, 2009
- */
-
-#ifdef __KERNEL__
-#include <linux/string.h>
-#else
-#include <string.h>
-#endif
-#include "inat.h"
-#include "insn.h"
-
-/* Verify next sizeof(t) bytes can be on the same instruction */
-#define validate_next(t, insn, n) \
- ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
-
-#define __get_next(t, insn) \
- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
-
-#define __peek_nbyte_next(t, insn, n) \
- ({ t r = *(t*)((insn)->next_byte + n); r; })
-
-#define get_next(t, insn) \
- ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
-
-#define peek_nbyte_next(t, insn, n) \
- ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
-
-#define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
-
-/**
- * insn_init() - initialize struct insn
- * @insn: &struct insn to be initialized
- * @kaddr: address (in kernel memory) of instruction (or copy thereof)
- * @x86_64: !0 for 64-bit kernel or 64-bit app
- */
-void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
-{
- /*
- * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
- * even if the input buffer is long enough to hold them.
- */
- if (buf_len > MAX_INSN_SIZE)
- buf_len = MAX_INSN_SIZE;
-
- memset(insn, 0, sizeof(*insn));
- insn->kaddr = kaddr;
- insn->end_kaddr = kaddr + buf_len;
- insn->next_byte = kaddr;
- insn->x86_64 = x86_64 ? 1 : 0;
- insn->opnd_bytes = 4;
- if (x86_64)
- insn->addr_bytes = 8;
- else
- insn->addr_bytes = 4;
-}
-
-/**
- * insn_get_prefixes - scan x86 instruction prefix bytes
- * @insn: &struct insn containing instruction
- *
- * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
- * to point to the (first) opcode. No effect if @insn->prefixes.got
- * is already set.
- */
-void insn_get_prefixes(struct insn *insn)
-{
- struct insn_field *prefixes = &insn->prefixes;
- insn_attr_t attr;
- insn_byte_t b, lb;
- int i, nb;
-
- if (prefixes->got)
- return;
-
- nb = 0;
- lb = 0;
- b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- while (inat_is_legacy_prefix(attr)) {
- /* Skip if same prefix */
- for (i = 0; i < nb; i++)
- if (prefixes->bytes[i] == b)
- goto found;
- if (nb == 4)
- /* Invalid instruction */
- break;
- prefixes->bytes[nb++] = b;
- if (inat_is_address_size_prefix(attr)) {
- /* address size switches 2/4 or 4/8 */
- if (insn->x86_64)
- insn->addr_bytes ^= 12;
- else
- insn->addr_bytes ^= 6;
- } else if (inat_is_operand_size_prefix(attr)) {
- /* oprand size switches 2/4 */
- insn->opnd_bytes ^= 6;
- }
-found:
- prefixes->nbytes++;
- insn->next_byte++;
- lb = b;
- b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- }
- /* Set the last prefix */
- if (lb && lb != insn->prefixes.bytes[3]) {
- if (unlikely(insn->prefixes.bytes[3])) {
- /* Swap the last prefix */
- b = insn->prefixes.bytes[3];
- for (i = 0; i < nb; i++)
- if (prefixes->bytes[i] == lb)
- prefixes->bytes[i] = b;
- }
- insn->prefixes.bytes[3] = lb;
- }
-
- /* Decode REX prefix */
- if (insn->x86_64) {
- b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- if (inat_is_rex_prefix(attr)) {
- insn->rex_prefix.value = b;
- insn->rex_prefix.nbytes = 1;
- insn->next_byte++;
- if (X86_REX_W(b))
- /* REX.W overrides opnd_size */
- insn->opnd_bytes = 8;
- }
- }
- insn->rex_prefix.got = 1;
-
- /* Decode VEX prefix */
- b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- if (inat_is_vex_prefix(attr)) {
- insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
- if (!insn->x86_64) {
- /*
- * In 32-bits mode, if the [7:6] bits (mod bits of
- * ModRM) on the second byte are not 11b, it is
- * LDS or LES or BOUND.
- */
- if (X86_MODRM_MOD(b2) != 3)
- goto vex_end;
- }
- insn->vex_prefix.bytes[0] = b;
- insn->vex_prefix.bytes[1] = b2;
- if (inat_is_evex_prefix(attr)) {
- b2 = peek_nbyte_next(insn_byte_t, insn, 2);
- insn->vex_prefix.bytes[2] = b2;
- b2 = peek_nbyte_next(insn_byte_t, insn, 3);
- insn->vex_prefix.bytes[3] = b2;
- insn->vex_prefix.nbytes = 4;
- insn->next_byte += 4;
- if (insn->x86_64 && X86_VEX_W(b2))
- /* VEX.W overrides opnd_size */
- insn->opnd_bytes = 8;
- } else if (inat_is_vex3_prefix(attr)) {
- b2 = peek_nbyte_next(insn_byte_t, insn, 2);
- insn->vex_prefix.bytes[2] = b2;
- insn->vex_prefix.nbytes = 3;
- insn->next_byte += 3;
- if (insn->x86_64 && X86_VEX_W(b2))
- /* VEX.W overrides opnd_size */
- insn->opnd_bytes = 8;
- } else {
- /*
- * For VEX2, fake VEX3-like byte#2.
- * Makes it easier to decode vex.W, vex.vvvv,
- * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
- */
- insn->vex_prefix.bytes[2] = b2 & 0x7f;
- insn->vex_prefix.nbytes = 2;
- insn->next_byte += 2;
- }
- }
-vex_end:
- insn->vex_prefix.got = 1;
-
- prefixes->got = 1;
-
-err_out:
- return;
-}
-
-/**
- * insn_get_opcode - collect opcode(s)
- * @insn: &struct insn containing instruction
- *
- * Populates @insn->opcode, updates @insn->next_byte to point past the
- * opcode byte(s), and set @insn->attr (except for groups).
- * If necessary, first collects any preceding (prefix) bytes.
- * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
- * is already 1.
- */
-void insn_get_opcode(struct insn *insn)
-{
- struct insn_field *opcode = &insn->opcode;
- insn_byte_t op;
- int pfx_id;
- if (opcode->got)
- return;
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
-
- /* Get first opcode */
- op = get_next(insn_byte_t, insn);
- opcode->bytes[0] = op;
- opcode->nbytes = 1;
-
- /* Check if there is VEX prefix or not */
- if (insn_is_avx(insn)) {
- insn_byte_t m, p;
- m = insn_vex_m_bits(insn);
- p = insn_vex_p_bits(insn);
- insn->attr = inat_get_avx_attribute(op, m, p);
- if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
- (!inat_accept_vex(insn->attr) &&
- !inat_is_group(insn->attr)))
- insn->attr = 0; /* This instruction is bad */
- goto end; /* VEX has only 1 byte for opcode */
- }
-
- insn->attr = inat_get_opcode_attribute(op);
- while (inat_is_escape(insn->attr)) {
- /* Get escaped opcode */
- op = get_next(insn_byte_t, insn);
- opcode->bytes[opcode->nbytes++] = op;
- pfx_id = insn_last_prefix_id(insn);
- insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
- }
- if (inat_must_vex(insn->attr))
- insn->attr = 0; /* This instruction is bad */
-end:
- opcode->got = 1;
-
-err_out:
- return;
-}
-
-/**
- * insn_get_modrm - collect ModRM byte, if any
- * @insn: &struct insn containing instruction
- *
- * Populates @insn->modrm and updates @insn->next_byte to point past the
- * ModRM byte, if any. If necessary, first collects the preceding bytes
- * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
- */
-void insn_get_modrm(struct insn *insn)
-{
- struct insn_field *modrm = &insn->modrm;
- insn_byte_t pfx_id, mod;
- if (modrm->got)
- return;
- if (!insn->opcode.got)
- insn_get_opcode(insn);
-
- if (inat_has_modrm(insn->attr)) {
- mod = get_next(insn_byte_t, insn);
- modrm->value = mod;
- modrm->nbytes = 1;
- if (inat_is_group(insn->attr)) {
- pfx_id = insn_last_prefix_id(insn);
- insn->attr = inat_get_group_attribute(mod, pfx_id,
- insn->attr);
- if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
- insn->attr = 0; /* This is bad */
- }
- }
-
- if (insn->x86_64 && inat_is_force64(insn->attr))
- insn->opnd_bytes = 8;
- modrm->got = 1;
-
-err_out:
- return;
-}
-
-
-/**
- * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * ModRM byte. No effect if @insn->x86_64 is 0.
- */
-int insn_rip_relative(struct insn *insn)
-{
- struct insn_field *modrm = &insn->modrm;
-
- if (!insn->x86_64)
- return 0;
- if (!modrm->got)
- insn_get_modrm(insn);
- /*
- * For rip-relative instructions, the mod field (top 2 bits)
- * is zero and the r/m field (bottom 3 bits) is 0x5.
- */
- return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
-}
-
-/**
- * insn_get_sib() - Get the SIB byte of instruction
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * ModRM byte.
- */
-void insn_get_sib(struct insn *insn)
-{
- insn_byte_t modrm;
-
- if (insn->sib.got)
- return;
- if (!insn->modrm.got)
- insn_get_modrm(insn);
- if (insn->modrm.nbytes) {
- modrm = (insn_byte_t)insn->modrm.value;
- if (insn->addr_bytes != 2 &&
- X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
- insn->sib.value = get_next(insn_byte_t, insn);
- insn->sib.nbytes = 1;
- }
- }
- insn->sib.got = 1;
-
-err_out:
- return;
-}
-
-
-/**
- * insn_get_displacement() - Get the displacement of instruction
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * SIB byte.
- * Displacement value is sign-expanded.
- */
-void insn_get_displacement(struct insn *insn)
-{
- insn_byte_t mod, rm, base;
-
- if (insn->displacement.got)
- return;
- if (!insn->sib.got)
- insn_get_sib(insn);
- if (insn->modrm.nbytes) {
- /*
- * Interpreting the modrm byte:
- * mod = 00 - no displacement fields (exceptions below)
- * mod = 01 - 1-byte displacement field
- * mod = 10 - displacement field is 4 bytes, or 2 bytes if
- * address size = 2 (0x67 prefix in 32-bit mode)
- * mod = 11 - no memory operand
- *
- * If address size = 2...
- * mod = 00, r/m = 110 - displacement field is 2 bytes
- *
- * If address size != 2...
- * mod != 11, r/m = 100 - SIB byte exists
- * mod = 00, SIB base = 101 - displacement field is 4 bytes
- * mod = 00, r/m = 101 - rip-relative addressing, displacement
- * field is 4 bytes
- */
- mod = X86_MODRM_MOD(insn->modrm.value);
- rm = X86_MODRM_RM(insn->modrm.value);
- base = X86_SIB_BASE(insn->sib.value);
- if (mod == 3)
- goto out;
- if (mod == 1) {
- insn->displacement.value = get_next(signed char, insn);
- insn->displacement.nbytes = 1;
- } else if (insn->addr_bytes == 2) {
- if ((mod == 0 && rm == 6) || mod == 2) {
- insn->displacement.value =
- get_next(short, insn);
- insn->displacement.nbytes = 2;
- }
- } else {
- if ((mod == 0 && rm == 5) || mod == 2 ||
- (mod == 0 && base == 5)) {
- insn->displacement.value = get_next(int, insn);
- insn->displacement.nbytes = 4;
- }
- }
- }
-out:
- insn->displacement.got = 1;
-
-err_out:
- return;
-}
-
-/* Decode moffset16/32/64. Return 0 if failed */
-static int __get_moffset(struct insn *insn)
-{
- switch (insn->addr_bytes) {
- case 2:
- insn->moffset1.value = get_next(short, insn);
- insn->moffset1.nbytes = 2;
- break;
- case 4:
- insn->moffset1.value = get_next(int, insn);
- insn->moffset1.nbytes = 4;
- break;
- case 8:
- insn->moffset1.value = get_next(int, insn);
- insn->moffset1.nbytes = 4;
- insn->moffset2.value = get_next(int, insn);
- insn->moffset2.nbytes = 4;
- break;
- default: /* opnd_bytes must be modified manually */
- goto err_out;
- }
- insn->moffset1.got = insn->moffset2.got = 1;
-
- return 1;
-
-err_out:
- return 0;
-}
-
-/* Decode imm v32(Iz). Return 0 if failed */
-static int __get_immv32(struct insn *insn)
-{
- switch (insn->opnd_bytes) {
- case 2:
- insn->immediate.value = get_next(short, insn);
- insn->immediate.nbytes = 2;
- break;
- case 4:
- case 8:
- insn->immediate.value = get_next(int, insn);
- insn->immediate.nbytes = 4;
- break;
- default: /* opnd_bytes must be modified manually */
- goto err_out;
- }
-
- return 1;
-
-err_out:
- return 0;
-}
-
-/* Decode imm v64(Iv/Ov), Return 0 if failed */
-static int __get_immv(struct insn *insn)
-{
- switch (insn->opnd_bytes) {
- case 2:
- insn->immediate1.value = get_next(short, insn);
- insn->immediate1.nbytes = 2;
- break;
- case 4:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- break;
- case 8:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- insn->immediate2.value = get_next(int, insn);
- insn->immediate2.nbytes = 4;
- break;
- default: /* opnd_bytes must be modified manually */
- goto err_out;
- }
- insn->immediate1.got = insn->immediate2.got = 1;
-
- return 1;
-err_out:
- return 0;
-}
-
-/* Decode ptr16:16/32(Ap) */
-static int __get_immptr(struct insn *insn)
-{
- switch (insn->opnd_bytes) {
- case 2:
- insn->immediate1.value = get_next(short, insn);
- insn->immediate1.nbytes = 2;
- break;
- case 4:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- break;
- case 8:
- /* ptr16:64 is not exist (no segment) */
- return 0;
- default: /* opnd_bytes must be modified manually */
- goto err_out;
- }
- insn->immediate2.value = get_next(unsigned short, insn);
- insn->immediate2.nbytes = 2;
- insn->immediate1.got = insn->immediate2.got = 1;
-
- return 1;
-err_out:
- return 0;
-}
-
-/**
- * insn_get_immediate() - Get the immediates of instruction
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * displacement bytes.
- * Basically, most of immediates are sign-expanded. Unsigned-value can be
- * get by bit masking with ((1 << (nbytes * 8)) - 1)
- */
-void insn_get_immediate(struct insn *insn)
-{
- if (insn->immediate.got)
- return;
- if (!insn->displacement.got)
- insn_get_displacement(insn);
-
- if (inat_has_moffset(insn->attr)) {
- if (!__get_moffset(insn))
- goto err_out;
- goto done;
- }
-
- if (!inat_has_immediate(insn->attr))
- /* no immediates */
- goto done;
-
- switch (inat_immediate_size(insn->attr)) {
- case INAT_IMM_BYTE:
- insn->immediate.value = get_next(signed char, insn);
- insn->immediate.nbytes = 1;
- break;
- case INAT_IMM_WORD:
- insn->immediate.value = get_next(short, insn);
- insn->immediate.nbytes = 2;
- break;
- case INAT_IMM_DWORD:
- insn->immediate.value = get_next(int, insn);
- insn->immediate.nbytes = 4;
- break;
- case INAT_IMM_QWORD:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- insn->immediate2.value = get_next(int, insn);
- insn->immediate2.nbytes = 4;
- break;
- case INAT_IMM_PTR:
- if (!__get_immptr(insn))
- goto err_out;
- break;
- case INAT_IMM_VWORD32:
- if (!__get_immv32(insn))
- goto err_out;
- break;
- case INAT_IMM_VWORD:
- if (!__get_immv(insn))
- goto err_out;
- break;
- default:
- /* Here, insn must have an immediate, but failed */
- goto err_out;
- }
- if (inat_has_second_immediate(insn->attr)) {
- insn->immediate2.value = get_next(signed char, insn);
- insn->immediate2.nbytes = 1;
- }
-done:
- insn->immediate.got = 1;
-
-err_out:
- return;
-}
-
-/**
- * insn_get_length() - Get the length of instruction
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * immediates bytes.
- */
-void insn_get_length(struct insn *insn)
-{
- if (insn->length)
- return;
- if (!insn->immediate.got)
- insn_get_immediate(insn);
- insn->length = (unsigned char)((unsigned long)insn->next_byte
- - (unsigned long)insn->kaddr);
-}
diff --git a/tools/objtool/arch/x86/insn/insn.h b/tools/objtool/arch/x86/insn/insn.h
deleted file mode 100644
index e23578c7b1be..000000000000
--- a/tools/objtool/arch/x86/insn/insn.h
+++ /dev/null
@@ -1,211 +0,0 @@
-#ifndef _ASM_X86_INSN_H
-#define _ASM_X86_INSN_H
-/*
- * x86 instruction analysis
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2009
- */
-
-/* insn_attr_t is defined in inat.h */
-#include "inat.h"
-
-struct insn_field {
- union {
- insn_value_t value;
- insn_byte_t bytes[4];
- };
- /* !0 if we've run insn_get_xxx() for this field */
- unsigned char got;
- unsigned char nbytes;
-};
-
-struct insn {
- struct insn_field prefixes; /*
- * Prefixes
- * prefixes.bytes[3]: last prefix
- */
- struct insn_field rex_prefix; /* REX prefix */
- struct insn_field vex_prefix; /* VEX prefix */
- struct insn_field opcode; /*
- * opcode.bytes[0]: opcode1
- * opcode.bytes[1]: opcode2
- * opcode.bytes[2]: opcode3
- */
- struct insn_field modrm;
- struct insn_field sib;
- struct insn_field displacement;
- union {
- struct insn_field immediate;
- struct insn_field moffset1; /* for 64bit MOV */
- struct insn_field immediate1; /* for 64bit imm or off16/32 */
- };
- union {
- struct insn_field moffset2; /* for 64bit MOV */
- struct insn_field immediate2; /* for 64bit imm or seg16 */
- };
-
- insn_attr_t attr;
- unsigned char opnd_bytes;
- unsigned char addr_bytes;
- unsigned char length;
- unsigned char x86_64;
-
- const insn_byte_t *kaddr; /* kernel address of insn to analyze */
- const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */
- const insn_byte_t *next_byte;
-};
-
-#define MAX_INSN_SIZE 15
-
-#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
-#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
-#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
-
-#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
-#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
-#define X86_SIB_BASE(sib) ((sib) & 0x07)
-
-#define X86_REX_W(rex) ((rex) & 8)
-#define X86_REX_R(rex) ((rex) & 4)
-#define X86_REX_X(rex) ((rex) & 2)
-#define X86_REX_B(rex) ((rex) & 1)
-
-/* VEX bit flags */
-#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
-#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
-#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
-#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
-#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
-/* VEX bit fields */
-#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
-#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
-#define X86_VEX2_M 1 /* VEX2.M always 1 */
-#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
-#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
-#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
-
-extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
-extern void insn_get_prefixes(struct insn *insn);
-extern void insn_get_opcode(struct insn *insn);
-extern void insn_get_modrm(struct insn *insn);
-extern void insn_get_sib(struct insn *insn);
-extern void insn_get_displacement(struct insn *insn);
-extern void insn_get_immediate(struct insn *insn);
-extern void insn_get_length(struct insn *insn);
-
-/* Attribute will be determined after getting ModRM (for opcode groups) */
-static inline void insn_get_attribute(struct insn *insn)
-{
- insn_get_modrm(insn);
-}
-
-/* Instruction uses RIP-relative addressing */
-extern int insn_rip_relative(struct insn *insn);
-
-/* Init insn for kernel text */
-static inline void kernel_insn_init(struct insn *insn,
- const void *kaddr, int buf_len)
-{
-#ifdef CONFIG_X86_64
- insn_init(insn, kaddr, buf_len, 1);
-#else /* CONFIG_X86_32 */
- insn_init(insn, kaddr, buf_len, 0);
-#endif
-}
-
-static inline int insn_is_avx(struct insn *insn)
-{
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
- return (insn->vex_prefix.value != 0);
-}
-
-static inline int insn_is_evex(struct insn *insn)
-{
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
- return (insn->vex_prefix.nbytes == 4);
-}
-
-/* Ensure this instruction is decoded completely */
-static inline int insn_complete(struct insn *insn)
-{
- return insn->opcode.got && insn->modrm.got && insn->sib.got &&
- insn->displacement.got && insn->immediate.got;
-}
-
-static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
-{
- if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
- return X86_VEX2_M;
- else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VEX */
- return X86_VEX3_M(insn->vex_prefix.bytes[1]);
- else /* EVEX */
- return X86_EVEX_M(insn->vex_prefix.bytes[1]);
-}
-
-static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
-{
- if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
- return X86_VEX_P(insn->vex_prefix.bytes[1]);
- else
- return X86_VEX_P(insn->vex_prefix.bytes[2]);
-}
-
-/* Get the last prefix id from last prefix or VEX prefix */
-static inline int insn_last_prefix_id(struct insn *insn)
-{
- if (insn_is_avx(insn))
- return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
-
- if (insn->prefixes.bytes[3])
- return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
-
- return 0;
-}
-
-/* Offset of each field from kaddr */
-static inline int insn_offset_rex_prefix(struct insn *insn)
-{
- return insn->prefixes.nbytes;
-}
-static inline int insn_offset_vex_prefix(struct insn *insn)
-{
- return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
-}
-static inline int insn_offset_opcode(struct insn *insn)
-{
- return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
-}
-static inline int insn_offset_modrm(struct insn *insn)
-{
- return insn_offset_opcode(insn) + insn->opcode.nbytes;
-}
-static inline int insn_offset_sib(struct insn *insn)
-{
- return insn_offset_modrm(insn) + insn->modrm.nbytes;
-}
-static inline int insn_offset_displacement(struct insn *insn)
-{
- return insn_offset_sib(insn) + insn->sib.nbytes;
-}
-static inline int insn_offset_immediate(struct insn *insn)
-{
- return insn_offset_displacement(insn) + insn->displacement.nbytes;
-}
-
-#endif /* _ASM_X86_INSN_H */
diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
deleted file mode 100644
index 1754e094bc28..000000000000
--- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
+++ /dev/null
@@ -1,1063 +0,0 @@
-# x86 Opcode Maps
-#
-# This is (mostly) based on following documentations.
-# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
-# (#326018-047US, June 2013)
-#
-#<Opcode maps>
-# Table: table-name
-# Referrer: escaped-name
-# AVXcode: avx-code
-# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
-# (or)
-# opcode: escape # escaped-name
-# EndTable
-#
-# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
-# mnemonics that begin with lowercase 'k' accept a VEX prefix
-#
-#<group maps>
-# GrpTable: GrpXXX
-# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
-# EndTable
-#
-# AVX Superscripts
-# (ev): this opcode requires EVEX prefix.
-# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
-# (v): this opcode requires VEX prefix.
-# (v1): this opcode only supports 128bit VEX.
-#
-# Last Prefix Superscripts
-# - (66): the last prefix is 0x66
-# - (F3): the last prefix is 0xF3
-# - (F2): the last prefix is 0xF2
-# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
-# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
-
-Table: one byte opcode
-Referrer:
-AVXcode:
-# 0x00 - 0x0f
-00: ADD Eb,Gb
-01: ADD Ev,Gv
-02: ADD Gb,Eb
-03: ADD Gv,Ev
-04: ADD AL,Ib
-05: ADD rAX,Iz
-06: PUSH ES (i64)
-07: POP ES (i64)
-08: OR Eb,Gb
-09: OR Ev,Gv
-0a: OR Gb,Eb
-0b: OR Gv,Ev
-0c: OR AL,Ib
-0d: OR rAX,Iz
-0e: PUSH CS (i64)
-0f: escape # 2-byte escape
-# 0x10 - 0x1f
-10: ADC Eb,Gb
-11: ADC Ev,Gv
-12: ADC Gb,Eb
-13: ADC Gv,Ev
-14: ADC AL,Ib
-15: ADC rAX,Iz
-16: PUSH SS (i64)
-17: POP SS (i64)
-18: SBB Eb,Gb
-19: SBB Ev,Gv
-1a: SBB Gb,Eb
-1b: SBB Gv,Ev
-1c: SBB AL,Ib
-1d: SBB rAX,Iz
-1e: PUSH DS (i64)
-1f: POP DS (i64)
-# 0x20 - 0x2f
-20: AND Eb,Gb
-21: AND Ev,Gv
-22: AND Gb,Eb
-23: AND Gv,Ev
-24: AND AL,Ib
-25: AND rAx,Iz
-26: SEG=ES (Prefix)
-27: DAA (i64)
-28: SUB Eb,Gb
-29: SUB Ev,Gv
-2a: SUB Gb,Eb
-2b: SUB Gv,Ev
-2c: SUB AL,Ib
-2d: SUB rAX,Iz
-2e: SEG=CS (Prefix)
-2f: DAS (i64)
-# 0x30 - 0x3f
-30: XOR Eb,Gb
-31: XOR Ev,Gv
-32: XOR Gb,Eb
-33: XOR Gv,Ev
-34: XOR AL,Ib
-35: XOR rAX,Iz
-36: SEG=SS (Prefix)
-37: AAA (i64)
-38: CMP Eb,Gb
-39: CMP Ev,Gv
-3a: CMP Gb,Eb
-3b: CMP Gv,Ev
-3c: CMP AL,Ib
-3d: CMP rAX,Iz
-3e: SEG=DS (Prefix)
-3f: AAS (i64)
-# 0x40 - 0x4f
-40: INC eAX (i64) | REX (o64)
-41: INC eCX (i64) | REX.B (o64)
-42: INC eDX (i64) | REX.X (o64)
-43: INC eBX (i64) | REX.XB (o64)
-44: INC eSP (i64) | REX.R (o64)
-45: INC eBP (i64) | REX.RB (o64)
-46: INC eSI (i64) | REX.RX (o64)
-47: INC eDI (i64) | REX.RXB (o64)
-48: DEC eAX (i64) | REX.W (o64)
-49: DEC eCX (i64) | REX.WB (o64)
-4a: DEC eDX (i64) | REX.WX (o64)
-4b: DEC eBX (i64) | REX.WXB (o64)
-4c: DEC eSP (i64) | REX.WR (o64)
-4d: DEC eBP (i64) | REX.WRB (o64)
-4e: DEC eSI (i64) | REX.WRX (o64)
-4f: DEC eDI (i64) | REX.WRXB (o64)
-# 0x50 - 0x5f
-50: PUSH rAX/r8 (d64)
-51: PUSH rCX/r9 (d64)
-52: PUSH rDX/r10 (d64)
-53: PUSH rBX/r11 (d64)
-54: PUSH rSP/r12 (d64)
-55: PUSH rBP/r13 (d64)
-56: PUSH rSI/r14 (d64)
-57: PUSH rDI/r15 (d64)
-58: POP rAX/r8 (d64)
-59: POP rCX/r9 (d64)
-5a: POP rDX/r10 (d64)
-5b: POP rBX/r11 (d64)
-5c: POP rSP/r12 (d64)
-5d: POP rBP/r13 (d64)
-5e: POP rSI/r14 (d64)
-5f: POP rDI/r15 (d64)
-# 0x60 - 0x6f
-60: PUSHA/PUSHAD (i64)
-61: POPA/POPAD (i64)
-62: BOUND Gv,Ma (i64) | EVEX (Prefix)
-63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
-64: SEG=FS (Prefix)
-65: SEG=GS (Prefix)
-66: Operand-Size (Prefix)
-67: Address-Size (Prefix)
-68: PUSH Iz (d64)
-69: IMUL Gv,Ev,Iz
-6a: PUSH Ib (d64)
-6b: IMUL Gv,Ev,Ib
-6c: INS/INSB Yb,DX
-6d: INS/INSW/INSD Yz,DX
-6e: OUTS/OUTSB DX,Xb
-6f: OUTS/OUTSW/OUTSD DX,Xz
-# 0x70 - 0x7f
-70: JO Jb
-71: JNO Jb
-72: JB/JNAE/JC Jb
-73: JNB/JAE/JNC Jb
-74: JZ/JE Jb
-75: JNZ/JNE Jb
-76: JBE/JNA Jb
-77: JNBE/JA Jb
-78: JS Jb
-79: JNS Jb
-7a: JP/JPE Jb
-7b: JNP/JPO Jb
-7c: JL/JNGE Jb
-7d: JNL/JGE Jb
-7e: JLE/JNG Jb
-7f: JNLE/JG Jb
-# 0x80 - 0x8f
-80: Grp1 Eb,Ib (1A)
-81: Grp1 Ev,Iz (1A)
-82: Grp1 Eb,Ib (1A),(i64)
-83: Grp1 Ev,Ib (1A)
-84: TEST Eb,Gb
-85: TEST Ev,Gv
-86: XCHG Eb,Gb
-87: XCHG Ev,Gv
-88: MOV Eb,Gb
-89: MOV Ev,Gv
-8a: MOV Gb,Eb
-8b: MOV Gv,Ev
-8c: MOV Ev,Sw
-8d: LEA Gv,M
-8e: MOV Sw,Ew
-8f: Grp1A (1A) | POP Ev (d64)
-# 0x90 - 0x9f
-90: NOP | PAUSE (F3) | XCHG r8,rAX
-91: XCHG rCX/r9,rAX
-92: XCHG rDX/r10,rAX
-93: XCHG rBX/r11,rAX
-94: XCHG rSP/r12,rAX
-95: XCHG rBP/r13,rAX
-96: XCHG rSI/r14,rAX
-97: XCHG rDI/r15,rAX
-98: CBW/CWDE/CDQE
-99: CWD/CDQ/CQO
-9a: CALLF Ap (i64)
-9b: FWAIT/WAIT
-9c: PUSHF/D/Q Fv (d64)
-9d: POPF/D/Q Fv (d64)
-9e: SAHF
-9f: LAHF
-# 0xa0 - 0xaf
-a0: MOV AL,Ob
-a1: MOV rAX,Ov
-a2: MOV Ob,AL
-a3: MOV Ov,rAX
-a4: MOVS/B Yb,Xb
-a5: MOVS/W/D/Q Yv,Xv
-a6: CMPS/B Xb,Yb
-a7: CMPS/W/D Xv,Yv
-a8: TEST AL,Ib
-a9: TEST rAX,Iz
-aa: STOS/B Yb,AL
-ab: STOS/W/D/Q Yv,rAX
-ac: LODS/B AL,Xb
-ad: LODS/W/D/Q rAX,Xv
-ae: SCAS/B AL,Yb
-# Note: The May 2011 Intel manual shows Xv for the second parameter of the
-# next instruction but Yv is correct
-af: SCAS/W/D/Q rAX,Yv
-# 0xb0 - 0xbf
-b0: MOV AL/R8L,Ib
-b1: MOV CL/R9L,Ib
-b2: MOV DL/R10L,Ib
-b3: MOV BL/R11L,Ib
-b4: MOV AH/R12L,Ib
-b5: MOV CH/R13L,Ib
-b6: MOV DH/R14L,Ib
-b7: MOV BH/R15L,Ib
-b8: MOV rAX/r8,Iv
-b9: MOV rCX/r9,Iv
-ba: MOV rDX/r10,Iv
-bb: MOV rBX/r11,Iv
-bc: MOV rSP/r12,Iv
-bd: MOV rBP/r13,Iv
-be: MOV rSI/r14,Iv
-bf: MOV rDI/r15,Iv
-# 0xc0 - 0xcf
-c0: Grp2 Eb,Ib (1A)
-c1: Grp2 Ev,Ib (1A)
-c2: RETN Iw (f64)
-c3: RETN
-c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
-c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
-c6: Grp11A Eb,Ib (1A)
-c7: Grp11B Ev,Iz (1A)
-c8: ENTER Iw,Ib
-c9: LEAVE (d64)
-ca: RETF Iw
-cb: RETF
-cc: INT3
-cd: INT Ib
-ce: INTO (i64)
-cf: IRET/D/Q
-# 0xd0 - 0xdf
-d0: Grp2 Eb,1 (1A)
-d1: Grp2 Ev,1 (1A)
-d2: Grp2 Eb,CL (1A)
-d3: Grp2 Ev,CL (1A)
-d4: AAM Ib (i64)
-d5: AAD Ib (i64)
-d6:
-d7: XLAT/XLATB
-d8: ESC
-d9: ESC
-da: ESC
-db: ESC
-dc: ESC
-dd: ESC
-de: ESC
-df: ESC
-# 0xe0 - 0xef
-# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
-# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
-# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64)
-e1: LOOPE/LOOPZ Jb (f64)
-e2: LOOP Jb (f64)
-e3: JrCXZ Jb (f64)
-e4: IN AL,Ib
-e5: IN eAX,Ib
-e6: OUT Ib,AL
-e7: OUT Ib,eAX
-# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
-# in "near" jumps and calls is 16-bit. For CALL,
-# push of return address is 16-bit wide, RSP is decremented by 2
-# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64)
-e9: JMP-near Jz (f64)
-ea: JMP-far Ap (i64)
-eb: JMP-short Jb (f64)
-ec: IN AL,DX
-ed: IN eAX,DX
-ee: OUT DX,AL
-ef: OUT DX,eAX
-# 0xf0 - 0xff
-f0: LOCK (Prefix)
-f1:
-f2: REPNE (Prefix) | XACQUIRE (Prefix)
-f3: REP/REPE (Prefix) | XRELEASE (Prefix)
-f4: HLT
-f5: CMC
-f6: Grp3_1 Eb (1A)
-f7: Grp3_2 Ev (1A)
-f8: CLC
-f9: STC
-fa: CLI
-fb: STI
-fc: CLD
-fd: STD
-fe: Grp4 (1A)
-ff: Grp5 (1A)
-EndTable
-
-Table: 2-byte opcode (0x0f)
-Referrer: 2-byte escape
-AVXcode: 1
-# 0x0f 0x00-0x0f
-00: Grp6 (1A)
-01: Grp7 (1A)
-02: LAR Gv,Ew
-03: LSL Gv,Ew
-04:
-05: SYSCALL (o64)
-06: CLTS
-07: SYSRET (o64)
-08: INVD
-09: WBINVD
-0a:
-0b: UD2 (1B)
-0c:
-# AMD's prefetch group. Intel supports prefetchw(/1) only.
-0d: GrpP
-0e: FEMMS
-# 3DNow! uses the last imm byte as opcode extension.
-0f: 3DNow! Pq,Qq,Ib
-# 0x0f 0x10-0x1f
-# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
-# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
-# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
-# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
-# Reference A.1
-10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
-11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
-12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
-13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
-14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
-15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
-16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
-17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
-18: Grp16 (1A)
-19:
-# Intel SDM opcode map does not list MPX instructions. For now using Gv for
-# bnd registers and Ev for everything else is OK because the instruction
-# decoder does not use the information except as an indication that there is
-# a ModR/M byte.
-1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
-1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
-1d:
-1e:
-1f: NOP Ev
-# 0x0f 0x20-0x2f
-20: MOV Rd,Cd
-21: MOV Rd,Dd
-22: MOV Cd,Rd
-23: MOV Dd,Rd
-24:
-25:
-26:
-27:
-28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
-29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
-2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
-2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
-2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
-2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
-2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
-2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
-# 0x0f 0x30-0x3f
-30: WRMSR
-31: RDTSC
-32: RDMSR
-33: RDPMC
-34: SYSENTER
-35: SYSEXIT
-36:
-37: GETSEC
-38: escape # 3-byte escape 1
-39:
-3a: escape # 3-byte escape 2
-3b:
-3c:
-3d:
-3e:
-3f:
-# 0x0f 0x40-0x4f
-40: CMOVO Gv,Ev
-41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
-42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
-43: CMOVAE/NB/NC Gv,Ev
-44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
-45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
-46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
-47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
-48: CMOVS Gv,Ev
-49: CMOVNS Gv,Ev
-4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
-4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
-4c: CMOVL/NGE Gv,Ev
-4d: CMOVNL/GE Gv,Ev
-4e: CMOVLE/NG Gv,Ev
-4f: CMOVNLE/G Gv,Ev
-# 0x0f 0x50-0x5f
-50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
-51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
-52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
-53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
-54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
-55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
-56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
-57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
-58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
-59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
-5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
-5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
-5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
-5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
-5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
-5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
-# 0x0f 0x60-0x6f
-60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
-61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
-62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
-63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
-64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
-65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
-66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
-67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
-68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
-69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
-6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
-6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
-6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
-6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
-6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
-6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
-# 0x0f 0x70-0x7f
-70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
-71: Grp12 (1A)
-72: Grp13 (1A)
-73: Grp14 (1A)
-74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
-75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
-76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
-# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
-77: emms | vzeroupper | vzeroall
-78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
-79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
-7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
-7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
-7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
-7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
-7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
-7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
-# 0x0f 0x80-0x8f
-# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64)
-81: JNO Jz (f64)
-82: JB/JC/JNAE Jz (f64)
-83: JAE/JNB/JNC Jz (f64)
-84: JE/JZ Jz (f64)
-85: JNE/JNZ Jz (f64)
-86: JBE/JNA Jz (f64)
-87: JA/JNBE Jz (f64)
-88: JS Jz (f64)
-89: JNS Jz (f64)
-8a: JP/JPE Jz (f64)
-8b: JNP/JPO Jz (f64)
-8c: JL/JNGE Jz (f64)
-8d: JNL/JGE Jz (f64)
-8e: JLE/JNG Jz (f64)
-8f: JNLE/JG Jz (f64)
-# 0x0f 0x90-0x9f
-90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
-91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
-92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
-93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
-94: SETE/Z Eb
-95: SETNE/NZ Eb
-96: SETBE/NA Eb
-97: SETA/NBE Eb
-98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
-99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
-9a: SETP/PE Eb
-9b: SETNP/PO Eb
-9c: SETL/NGE Eb
-9d: SETNL/GE Eb
-9e: SETLE/NG Eb
-9f: SETNLE/G Eb
-# 0x0f 0xa0-0xaf
-a0: PUSH FS (d64)
-a1: POP FS (d64)
-a2: CPUID
-a3: BT Ev,Gv
-a4: SHLD Ev,Gv,Ib
-a5: SHLD Ev,Gv,CL
-a6: GrpPDLK
-a7: GrpRNG
-a8: PUSH GS (d64)
-a9: POP GS (d64)
-aa: RSM
-ab: BTS Ev,Gv
-ac: SHRD Ev,Gv,Ib
-ad: SHRD Ev,Gv,CL
-ae: Grp15 (1A),(1C)
-af: IMUL Gv,Ev
-# 0x0f 0xb0-0xbf
-b0: CMPXCHG Eb,Gb
-b1: CMPXCHG Ev,Gv
-b2: LSS Gv,Mp
-b3: BTR Ev,Gv
-b4: LFS Gv,Mp
-b5: LGS Gv,Mp
-b6: MOVZX Gv,Eb
-b7: MOVZX Gv,Ew
-b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
-b9: Grp10 (1A)
-ba: Grp8 Ev,Ib (1A)
-bb: BTC Ev,Gv
-bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
-bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
-be: MOVSX Gv,Eb
-bf: MOVSX Gv,Ew
-# 0x0f 0xc0-0xcf
-c0: XADD Eb,Gb
-c1: XADD Ev,Gv
-c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
-c3: movnti My,Gy
-c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
-c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
-c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
-c7: Grp9 (1A)
-c8: BSWAP RAX/EAX/R8/R8D
-c9: BSWAP RCX/ECX/R9/R9D
-ca: BSWAP RDX/EDX/R10/R10D
-cb: BSWAP RBX/EBX/R11/R11D
-cc: BSWAP RSP/ESP/R12/R12D
-cd: BSWAP RBP/EBP/R13/R13D
-ce: BSWAP RSI/ESI/R14/R14D
-cf: BSWAP RDI/EDI/R15/R15D
-# 0x0f 0xd0-0xdf
-d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
-d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
-d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
-d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
-d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
-d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
-d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
-d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
-d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
-d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
-da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
-db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
-dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
-dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
-de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
-df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0xe0-0xef
-e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
-e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
-e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
-e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
-e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
-e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
-e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
-e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
-e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
-e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
-ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
-eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
-ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
-ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
-ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
-ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0xf0-0xff
-f0: vlddqu Vx,Mx (F2)
-f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
-f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
-f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
-f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
-f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
-f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
-f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
-f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
-f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
-fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
-fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
-fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
-fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
-fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
-EndTable
-
-Table: 3-byte opcode 1 (0x0f 0x38)
-Referrer: 3-byte escape 1
-AVXcode: 2
-# 0x0f 0x38 0x00-0x0f
-00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
-01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
-02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
-03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
-04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
-05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
-06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
-07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
-08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
-09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
-0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
-0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
-0c: vpermilps Vx,Hx,Wx (66),(v)
-0d: vpermilpd Vx,Hx,Wx (66),(v)
-0e: vtestps Vx,Wx (66),(v)
-0f: vtestpd Vx,Wx (66),(v)
-# 0x0f 0x38 0x10-0x1f
-10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
-11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
-12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
-13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
-14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
-15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
-16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
-17: vptest Vx,Wx (66)
-18: vbroadcastss Vx,Wd (66),(v)
-19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
-1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
-1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
-1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
-1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
-1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
-1f: vpabsq Vx,Wx (66),(ev)
-# 0x0f 0x38 0x20-0x2f
-20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
-21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
-22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
-23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
-24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
-25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
-26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
-27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
-28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
-29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
-2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
-2b: vpackusdw Vx,Hx,Wx (66),(v1)
-2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
-2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
-2e: vmaskmovps Mx,Hx,Vx (66),(v)
-2f: vmaskmovpd Mx,Hx,Vx (66),(v)
-# 0x0f 0x38 0x30-0x3f
-30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
-31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
-32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
-33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
-34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
-35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
-36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
-37: vpcmpgtq Vx,Hx,Wx (66),(v1)
-38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
-39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
-3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
-3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
-3c: vpmaxsb Vx,Hx,Wx (66),(v1)
-3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
-3e: vpmaxuw Vx,Hx,Wx (66),(v1)
-3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0x38 0x40-0x8f
-40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
-41: vphminposuw Vdq,Wdq (66),(v1)
-42: vgetexpps/d Vx,Wx (66),(ev)
-43: vgetexpss/d Vx,Hx,Wx (66),(ev)
-44: vplzcntd/q Vx,Wx (66),(ev)
-45: vpsrlvd/q Vx,Hx,Wx (66),(v)
-46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
-47: vpsllvd/q Vx,Hx,Wx (66),(v)
-# Skip 0x48-0x4b
-4c: vrcp14ps/d Vpd,Wpd (66),(ev)
-4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
-4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
-4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-# Skip 0x50-0x57
-58: vpbroadcastd Vx,Wx (66),(v)
-59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
-5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
-5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
-# Skip 0x5c-0x63
-64: vpblendmd/q Vx,Hx,Wx (66),(ev)
-65: vblendmps/d Vx,Hx,Wx (66),(ev)
-66: vpblendmb/w Vx,Hx,Wx (66),(ev)
-# Skip 0x67-0x74
-75: vpermi2b/w Vx,Hx,Wx (66),(ev)
-76: vpermi2d/q Vx,Hx,Wx (66),(ev)
-77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
-78: vpbroadcastb Vx,Wx (66),(v)
-79: vpbroadcastw Vx,Wx (66),(v)
-7a: vpbroadcastb Vx,Rv (66),(ev)
-7b: vpbroadcastw Vx,Rv (66),(ev)
-7c: vpbroadcastd/q Vx,Rv (66),(ev)
-7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
-7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
-7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
-80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
-82: INVPCID Gy,Mdq (66)
-83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
-88: vexpandps/d Vpd,Wpd (66),(ev)
-89: vpexpandd/q Vx,Wx (66),(ev)
-8a: vcompressps/d Wx,Vx (66),(ev)
-8b: vpcompressd/q Wx,Vx (66),(ev)
-8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
-8d: vpermb/w Vx,Hx,Wx (66),(ev)
-8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
-# 0x0f 0x38 0x90-0xbf (FMA)
-90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
-91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
-92: vgatherdps/d Vx,Hx,Wx (66),(v)
-93: vgatherqps/d Vx,Hx,Wx (66),(v)
-94:
-95:
-96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
-97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
-98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
-99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
-9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
-9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
-9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
-9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
-9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
-9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
-a0: vpscatterdd/q Wx,Vx (66),(ev)
-a1: vpscatterqd/q Wx,Vx (66),(ev)
-a2: vscatterdps/d Wx,Vx (66),(ev)
-a3: vscatterqps/d Wx,Vx (66),(ev)
-a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
-a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
-a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
-a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
-aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
-ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
-ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
-ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
-ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
-af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
-b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
-b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
-b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
-b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
-b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
-b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
-ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
-bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
-bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
-bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
-be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
-bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
-# 0x0f 0x38 0xc0-0xff
-c4: vpconflictd/q Vx,Wx (66),(ev)
-c6: Grp18 (1A)
-c7: Grp19 (1A)
-c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
-c9: sha1msg1 Vdq,Wdq
-ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
-cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
-cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
-cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
-db: VAESIMC Vdq,Wdq (66),(v1)
-dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
-dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
-de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
-df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
-f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
-f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
-f2: ANDN Gy,By,Ey (v)
-f3: Grp17 (1A)
-f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
-f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
-f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
-EndTable
-
-Table: 3-byte opcode 2 (0x0f 0x3a)
-Referrer: 3-byte escape 2
-AVXcode: 3
-# 0x0f 0x3a 0x00-0xff
-00: vpermq Vqq,Wqq,Ib (66),(v)
-01: vpermpd Vqq,Wqq,Ib (66),(v)
-02: vpblendd Vx,Hx,Wx,Ib (66),(v)
-03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
-04: vpermilps Vx,Wx,Ib (66),(v)
-05: vpermilpd Vx,Wx,Ib (66),(v)
-06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
-07:
-08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
-09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
-0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
-0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
-0c: vblendps Vx,Hx,Wx,Ib (66)
-0d: vblendpd Vx,Hx,Wx,Ib (66)
-0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
-0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
-14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
-15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
-16: vpextrd/q Ey,Vdq,Ib (66),(v1)
-17: vextractps Ed,Vdq,Ib (66),(v1)
-18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
-19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
-1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
-1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
-1d: vcvtps2ph Wx,Vx,Ib (66),(v)
-1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
-1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
-20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
-21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
-22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
-23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
-25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
-26: vgetmantps/d Vx,Wx,Ib (66),(ev)
-27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
-30: kshiftrb/w Vk,Uk,Ib (66),(v)
-31: kshiftrd/q Vk,Uk,Ib (66),(v)
-32: kshiftlb/w Vk,Uk,Ib (66),(v)
-33: kshiftld/q Vk,Uk,Ib (66),(v)
-38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
-39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
-3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
-3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
-3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
-3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
-40: vdpps Vx,Hx,Wx,Ib (66)
-41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
-42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
-43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
-44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
-46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
-4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
-4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
-4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
-50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
-51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
-54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
-55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
-56: vreduceps/d Vx,Wx,Ib (66),(ev)
-57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
-60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
-61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
-62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
-63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
-66: vfpclassps/d Vk,Wx,Ib (66),(ev)
-67: vfpclassss/d Vk,Wx,Ib (66),(ev)
-cc: sha1rnds4 Vdq,Wdq,Ib
-df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
-f0: RORX Gy,Ey,Ib (F2),(v)
-EndTable
-
-GrpTable: Grp1
-0: ADD
-1: OR
-2: ADC
-3: SBB
-4: AND
-5: SUB
-6: XOR
-7: CMP
-EndTable
-
-GrpTable: Grp1A
-0: POP
-EndTable
-
-GrpTable: Grp2
-0: ROL
-1: ROR
-2: RCL
-3: RCR
-4: SHL/SAL
-5: SHR
-6:
-7: SAR
-EndTable
-
-GrpTable: Grp3_1
-0: TEST Eb,Ib
-1: TEST Eb,Ib
-2: NOT Eb
-3: NEG Eb
-4: MUL AL,Eb
-5: IMUL AL,Eb
-6: DIV AL,Eb
-7: IDIV AL,Eb
-EndTable
-
-GrpTable: Grp3_2
-0: TEST Ev,Iz
-1:
-2: NOT Ev
-3: NEG Ev
-4: MUL rAX,Ev
-5: IMUL rAX,Ev
-6: DIV rAX,Ev
-7: IDIV rAX,Ev
-EndTable
-
-GrpTable: Grp4
-0: INC Eb
-1: DEC Eb
-EndTable
-
-GrpTable: Grp5
-0: INC Ev
-1: DEC Ev
-# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-2: CALLN Ev (f64)
-3: CALLF Ep
-4: JMPN Ev (f64)
-5: JMPF Mp
-6: PUSH Ev (d64)
-7:
-EndTable
-
-GrpTable: Grp6
-0: SLDT Rv/Mw
-1: STR Rv/Mw
-2: LLDT Ew
-3: LTR Ew
-4: VERR Ew
-5: VERW Ew
-EndTable
-
-GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
-3: LIDT Ms
-4: SMSW Mw/Rv
-5: rdpkru (110),(11B) | wrpkru (111),(11B)
-6: LMSW Ew
-7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
-EndTable
-
-GrpTable: Grp8
-4: BT
-5: BTS
-6: BTR
-7: BTC
-EndTable
-
-GrpTable: Grp9
-1: CMPXCHG8B/16B Mq/Mdq
-3: xrstors
-4: xsavec
-5: xsaves
-6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
-7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
-EndTable
-
-GrpTable: Grp10
-EndTable
-
-# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
-GrpTable: Grp11A
-0: MOV Eb,Ib
-7: XABORT Ib (000),(11B)
-EndTable
-
-GrpTable: Grp11B
-0: MOV Eb,Iz
-7: XBEGIN Jz (000),(11B)
-EndTable
-
-GrpTable: Grp12
-2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
-4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
-6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp13
-0: vprord/q Hx,Wx,Ib (66),(ev)
-1: vprold/q Hx,Wx,Ib (66),(ev)
-2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
-4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
-6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp14
-2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
-3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
-6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
-7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp15
-0: fxsave | RDFSBASE Ry (F3),(11B)
-1: fxstor | RDGSBASE Ry (F3),(11B)
-2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
-3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
-4: XSAVE
-5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
-7: clflush | clflushopt (66) | sfence (11B)
-EndTable
-
-GrpTable: Grp16
-0: prefetch NTA
-1: prefetch T0
-2: prefetch T1
-3: prefetch T2
-EndTable
-
-GrpTable: Grp17
-1: BLSR By,Ey (v)
-2: BLSMSK By,Ey (v)
-3: BLSI By,Ey (v)
-EndTable
-
-GrpTable: Grp18
-1: vgatherpf0dps/d Wx (66),(ev)
-2: vgatherpf1dps/d Wx (66),(ev)
-5: vscatterpf0dps/d Wx (66),(ev)
-6: vscatterpf1dps/d Wx (66),(ev)
-EndTable
-
-GrpTable: Grp19
-1: vgatherpf0qps/d Wx (66),(ev)
-2: vgatherpf1qps/d Wx (66),(ev)
-5: vscatterpf0qps/d Wx (66),(ev)
-6: vscatterpf1qps/d Wx (66),(ev)
-EndTable
-
-# AMD's Prefetch Group
-GrpTable: GrpP
-0: PREFETCH
-1: PREFETCHW
-EndTable
-
-GrpTable: GrpPDLK
-0: MONTMUL
-1: XSHA1
-2: XSHA2
-EndTable
-
-GrpTable: GrpRNG
-0: xstore-rng
-1: xcrypt-ecb
-2: xcrypt-cbc
-4: xcrypt-cfb
-5: xcrypt-ofb
-EndTable
diff --git a/tools/objtool/arch/x86/lib/inat.c b/tools/objtool/arch/x86/lib/inat.c
new file mode 100644
index 000000000000..c1f01a8e9f65
--- /dev/null
+++ b/tools/objtool/arch/x86/lib/inat.c
@@ -0,0 +1,97 @@
+/*
+ * x86 instruction attribute tables
+ *
+ * Written by Masami Hiramatsu <mhiramat@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <asm/insn.h>
+
+/* Attribute tables are generated from opcode map */
+#include "inat-tables.c"
+
+/* Attribute search APIs */
+insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
+{
+ return inat_primary_table[opcode];
+}
+
+int inat_get_last_prefix_id(insn_byte_t last_pfx)
+{
+ insn_attr_t lpfx_attr;
+
+ lpfx_attr = inat_get_opcode_attribute(last_pfx);
+ return inat_last_prefix_id(lpfx_attr);
+}
+
+insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
+ insn_attr_t esc_attr)
+{
+ const insn_attr_t *table;
+ int n;
+
+ n = inat_escape_id(esc_attr);
+
+ table = inat_escape_tables[n][0];
+ if (!table)
+ return 0;
+ if (inat_has_variant(table[opcode]) && lpfx_id) {
+ table = inat_escape_tables[n][lpfx_id];
+ if (!table)
+ return 0;
+ }
+ return table[opcode];
+}
+
+insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
+ insn_attr_t grp_attr)
+{
+ const insn_attr_t *table;
+ int n;
+
+ n = inat_group_id(grp_attr);
+
+ table = inat_group_tables[n][0];
+ if (!table)
+ return inat_group_common_attribute(grp_attr);
+ if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
+ table = inat_group_tables[n][lpfx_id];
+ if (!table)
+ return inat_group_common_attribute(grp_attr);
+ }
+ return table[X86_MODRM_REG(modrm)] |
+ inat_group_common_attribute(grp_attr);
+}
+
+insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
+ insn_byte_t vex_p)
+{
+ const insn_attr_t *table;
+ if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
+ return 0;
+ /* At first, this checks the master table */
+ table = inat_avx_tables[vex_m][0];
+ if (!table)
+ return 0;
+ if (!inat_is_group(table[opcode]) && vex_p) {
+ /* If this is not a group, get attribute directly */
+ table = inat_avx_tables[vex_m][vex_p];
+ if (!table)
+ return 0;
+ }
+ return table[opcode];
+}
+
diff --git a/tools/objtool/arch/x86/lib/insn.c b/tools/objtool/arch/x86/lib/insn.c
new file mode 100644
index 000000000000..1088eb8f3a5f
--- /dev/null
+++ b/tools/objtool/arch/x86/lib/insn.c
@@ -0,0 +1,606 @@
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004, 2009
+ */
+
+#ifdef __KERNEL__
+#include <linux/string.h>
+#else
+#include <string.h>
+#endif
+#include <asm/inat.h>
+#include <asm/insn.h>
+
+/* Verify next sizeof(t) bytes can be on the same instruction */
+#define validate_next(t, insn, n) \
+ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
+
+#define __get_next(t, insn) \
+ ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+
+#define __peek_nbyte_next(t, insn, n) \
+ ({ t r = *(t*)((insn)->next_byte + n); r; })
+
+#define get_next(t, insn) \
+ ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
+
+#define peek_nbyte_next(t, insn, n) \
+ ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
+
+#define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
+
+/**
+ * insn_init() - initialize struct insn
+ * @insn: &struct insn to be initialized
+ * @kaddr: address (in kernel memory) of instruction (or copy thereof)
+ * @x86_64: !0 for 64-bit kernel or 64-bit app
+ */
+void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+{
+ /*
+ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
+ * even if the input buffer is long enough to hold them.
+ */
+ if (buf_len > MAX_INSN_SIZE)
+ buf_len = MAX_INSN_SIZE;
+
+ memset(insn, 0, sizeof(*insn));
+ insn->kaddr = kaddr;
+ insn->end_kaddr = kaddr + buf_len;
+ insn->next_byte = kaddr;
+ insn->x86_64 = x86_64 ? 1 : 0;
+ insn->opnd_bytes = 4;
+ if (x86_64)
+ insn->addr_bytes = 8;
+ else
+ insn->addr_bytes = 4;
+}
+
+/**
+ * insn_get_prefixes - scan x86 instruction prefix bytes
+ * @insn: &struct insn containing instruction
+ *
+ * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
+ * to point to the (first) opcode. No effect if @insn->prefixes.got
+ * is already set.
+ */
+void insn_get_prefixes(struct insn *insn)
+{
+ struct insn_field *prefixes = &insn->prefixes;
+ insn_attr_t attr;
+ insn_byte_t b, lb;
+ int i, nb;
+
+ if (prefixes->got)
+ return;
+
+ nb = 0;
+ lb = 0;
+ b = peek_next(insn_byte_t, insn);
+ attr = inat_get_opcode_attribute(b);
+ while (inat_is_legacy_prefix(attr)) {
+ /* Skip if same prefix */
+ for (i = 0; i < nb; i++)
+ if (prefixes->bytes[i] == b)
+ goto found;
+ if (nb == 4)
+ /* Invalid instruction */
+ break;
+ prefixes->bytes[nb++] = b;
+ if (inat_is_address_size_prefix(attr)) {
+ /* address size switches 2/4 or 4/8 */
+ if (insn->x86_64)
+ insn->addr_bytes ^= 12;
+ else
+ insn->addr_bytes ^= 6;
+ } else if (inat_is_operand_size_prefix(attr)) {
+ /* oprand size switches 2/4 */
+ insn->opnd_bytes ^= 6;
+ }
+found:
+ prefixes->nbytes++;
+ insn->next_byte++;
+ lb = b;
+ b = peek_next(insn_byte_t, insn);
+ attr = inat_get_opcode_attribute(b);
+ }
+ /* Set the last prefix */
+ if (lb && lb != insn->prefixes.bytes[3]) {
+ if (unlikely(insn->prefixes.bytes[3])) {
+ /* Swap the last prefix */
+ b = insn->prefixes.bytes[3];
+ for (i = 0; i < nb; i++)
+ if (prefixes->bytes[i] == lb)
+ prefixes->bytes[i] = b;
+ }
+ insn->prefixes.bytes[3] = lb;
+ }
+
+ /* Decode REX prefix */
+ if (insn->x86_64) {
+ b = peek_next(insn_byte_t, insn);
+ attr = inat_get_opcode_attribute(b);
+ if (inat_is_rex_prefix(attr)) {
+ insn->rex_prefix.value = b;
+ insn->rex_prefix.nbytes = 1;
+ insn->next_byte++;
+ if (X86_REX_W(b))
+ /* REX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ }
+ }
+ insn->rex_prefix.got = 1;
+
+ /* Decode VEX prefix */
+ b = peek_next(insn_byte_t, insn);
+ attr = inat_get_opcode_attribute(b);
+ if (inat_is_vex_prefix(attr)) {
+ insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
+ if (!insn->x86_64) {
+ /*
+ * In 32-bits mode, if the [7:6] bits (mod bits of
+ * ModRM) on the second byte are not 11b, it is
+ * LDS or LES or BOUND.
+ */
+ if (X86_MODRM_MOD(b2) != 3)
+ goto vex_end;
+ }
+ insn->vex_prefix.bytes[0] = b;
+ insn->vex_prefix.bytes[1] = b2;
+ if (inat_is_evex_prefix(attr)) {
+ b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+ insn->vex_prefix.bytes[2] = b2;
+ b2 = peek_nbyte_next(insn_byte_t, insn, 3);
+ insn->vex_prefix.bytes[3] = b2;
+ insn->vex_prefix.nbytes = 4;
+ insn->next_byte += 4;
+ if (insn->x86_64 && X86_VEX_W(b2))
+ /* VEX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ } else if (inat_is_vex3_prefix(attr)) {
+ b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+ insn->vex_prefix.bytes[2] = b2;
+ insn->vex_prefix.nbytes = 3;
+ insn->next_byte += 3;
+ if (insn->x86_64 && X86_VEX_W(b2))
+ /* VEX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ } else {
+ /*
+ * For VEX2, fake VEX3-like byte#2.
+ * Makes it easier to decode vex.W, vex.vvvv,
+ * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
+ */
+ insn->vex_prefix.bytes[2] = b2 & 0x7f;
+ insn->vex_prefix.nbytes = 2;
+ insn->next_byte += 2;
+ }
+ }
+vex_end:
+ insn->vex_prefix.got = 1;
+
+ prefixes->got = 1;
+
+err_out:
+ return;
+}
+
+/**
+ * insn_get_opcode - collect opcode(s)
+ * @insn: &struct insn containing instruction
+ *
+ * Populates @insn->opcode, updates @insn->next_byte to point past the
+ * opcode byte(s), and set @insn->attr (except for groups).
+ * If necessary, first collects any preceding (prefix) bytes.
+ * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
+ * is already 1.
+ */
+void insn_get_opcode(struct insn *insn)
+{
+ struct insn_field *opcode = &insn->opcode;
+ insn_byte_t op;
+ int pfx_id;
+ if (opcode->got)
+ return;
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+
+ /* Get first opcode */
+ op = get_next(insn_byte_t, insn);
+ opcode->bytes[0] = op;
+ opcode->nbytes = 1;
+
+ /* Check if there is VEX prefix or not */
+ if (insn_is_avx(insn)) {
+ insn_byte_t m, p;
+ m = insn_vex_m_bits(insn);
+ p = insn_vex_p_bits(insn);
+ insn->attr = inat_get_avx_attribute(op, m, p);
+ if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
+ (!inat_accept_vex(insn->attr) &&
+ !inat_is_group(insn->attr)))
+ insn->attr = 0; /* This instruction is bad */
+ goto end; /* VEX has only 1 byte for opcode */
+ }
+
+ insn->attr = inat_get_opcode_attribute(op);
+ while (inat_is_escape(insn->attr)) {
+ /* Get escaped opcode */
+ op = get_next(insn_byte_t, insn);
+ opcode->bytes[opcode->nbytes++] = op;
+ pfx_id = insn_last_prefix_id(insn);
+ insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
+ }
+ if (inat_must_vex(insn->attr))
+ insn->attr = 0; /* This instruction is bad */
+end:
+ opcode->got = 1;
+
+err_out:
+ return;
+}
+
+/**
+ * insn_get_modrm - collect ModRM byte, if any
+ * @insn: &struct insn containing instruction
+ *
+ * Populates @insn->modrm and updates @insn->next_byte to point past the
+ * ModRM byte, if any. If necessary, first collects the preceding bytes
+ * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
+ */
+void insn_get_modrm(struct insn *insn)
+{
+ struct insn_field *modrm = &insn->modrm;
+ insn_byte_t pfx_id, mod;
+ if (modrm->got)
+ return;
+ if (!insn->opcode.got)
+ insn_get_opcode(insn);
+
+ if (inat_has_modrm(insn->attr)) {
+ mod = get_next(insn_byte_t, insn);
+ modrm->value = mod;
+ modrm->nbytes = 1;
+ if (inat_is_group(insn->attr)) {
+ pfx_id = insn_last_prefix_id(insn);
+ insn->attr = inat_get_group_attribute(mod, pfx_id,
+ insn->attr);
+ if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
+ insn->attr = 0; /* This is bad */
+ }
+ }
+
+ if (insn->x86_64 && inat_is_force64(insn->attr))
+ insn->opnd_bytes = 8;
+ modrm->got = 1;
+
+err_out:
+ return;
+}
+
+
+/**
+ * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * ModRM byte. No effect if @insn->x86_64 is 0.
+ */
+int insn_rip_relative(struct insn *insn)
+{
+ struct insn_field *modrm = &insn->modrm;
+
+ if (!insn->x86_64)
+ return 0;
+ if (!modrm->got)
+ insn_get_modrm(insn);
+ /*
+ * For rip-relative instructions, the mod field (top 2 bits)
+ * is zero and the r/m field (bottom 3 bits) is 0x5.
+ */
+ return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
+}
+
+/**
+ * insn_get_sib() - Get the SIB byte of instruction
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * ModRM byte.
+ */
+void insn_get_sib(struct insn *insn)
+{
+ insn_byte_t modrm;
+
+ if (insn->sib.got)
+ return;
+ if (!insn->modrm.got)
+ insn_get_modrm(insn);
+ if (insn->modrm.nbytes) {
+ modrm = (insn_byte_t)insn->modrm.value;
+ if (insn->addr_bytes != 2 &&
+ X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
+ insn->sib.value = get_next(insn_byte_t, insn);
+ insn->sib.nbytes = 1;
+ }
+ }
+ insn->sib.got = 1;
+
+err_out:
+ return;
+}
+
+
+/**
+ * insn_get_displacement() - Get the displacement of instruction
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * SIB byte.
+ * Displacement value is sign-expanded.
+ */
+void insn_get_displacement(struct insn *insn)
+{
+ insn_byte_t mod, rm, base;
+
+ if (insn->displacement.got)
+ return;
+ if (!insn->sib.got)
+ insn_get_sib(insn);
+ if (insn->modrm.nbytes) {
+ /*
+ * Interpreting the modrm byte:
+ * mod = 00 - no displacement fields (exceptions below)
+ * mod = 01 - 1-byte displacement field
+ * mod = 10 - displacement field is 4 bytes, or 2 bytes if
+ * address size = 2 (0x67 prefix in 32-bit mode)
+ * mod = 11 - no memory operand
+ *
+ * If address size = 2...
+ * mod = 00, r/m = 110 - displacement field is 2 bytes
+ *
+ * If address size != 2...
+ * mod != 11, r/m = 100 - SIB byte exists
+ * mod = 00, SIB base = 101 - displacement field is 4 bytes
+ * mod = 00, r/m = 101 - rip-relative addressing, displacement
+ * field is 4 bytes
+ */
+ mod = X86_MODRM_MOD(insn->modrm.value);
+ rm = X86_MODRM_RM(insn->modrm.value);
+ base = X86_SIB_BASE(insn->sib.value);
+ if (mod == 3)
+ goto out;
+ if (mod == 1) {
+ insn->displacement.value = get_next(signed char, insn);
+ insn->displacement.nbytes = 1;
+ } else if (insn->addr_bytes == 2) {
+ if ((mod == 0 && rm == 6) || mod == 2) {
+ insn->displacement.value =
+ get_next(short, insn);
+ insn->displacement.nbytes = 2;
+ }
+ } else {
+ if ((mod == 0 && rm == 5) || mod == 2 ||
+ (mod == 0 && base == 5)) {
+ insn->displacement.value = get_next(int, insn);
+ insn->displacement.nbytes = 4;
+ }
+ }
+ }
+out:
+ insn->displacement.got = 1;
+
+err_out:
+ return;
+}
+
+/* Decode moffset16/32/64. Return 0 if failed */
+static int __get_moffset(struct insn *insn)
+{
+ switch (insn->addr_bytes) {
+ case 2:
+ insn->moffset1.value = get_next(short, insn);
+ insn->moffset1.nbytes = 2;
+ break;
+ case 4:
+ insn->moffset1.value = get_next(int, insn);
+ insn->moffset1.nbytes = 4;
+ break;
+ case 8:
+ insn->moffset1.value = get_next(int, insn);
+ insn->moffset1.nbytes = 4;
+ insn->moffset2.value = get_next(int, insn);
+ insn->moffset2.nbytes = 4;
+ break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
+ }
+ insn->moffset1.got = insn->moffset2.got = 1;
+
+ return 1;
+
+err_out:
+ return 0;
+}
+
+/* Decode imm v32(Iz). Return 0 if failed */
+static int __get_immv32(struct insn *insn)
+{
+ switch (insn->opnd_bytes) {
+ case 2:
+ insn->immediate.value = get_next(short, insn);
+ insn->immediate.nbytes = 2;
+ break;
+ case 4:
+ case 8:
+ insn->immediate.value = get_next(int, insn);
+ insn->immediate.nbytes = 4;
+ break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
+ }
+
+ return 1;
+
+err_out:
+ return 0;
+}
+
+/* Decode imm v64(Iv/Ov), Return 0 if failed */
+static int __get_immv(struct insn *insn)
+{
+ switch (insn->opnd_bytes) {
+ case 2:
+ insn->immediate1.value = get_next(short, insn);
+ insn->immediate1.nbytes = 2;
+ break;
+ case 4:
+ insn->immediate1.value = get_next(int, insn);
+ insn->immediate1.nbytes = 4;
+ break;
+ case 8:
+ insn->immediate1.value = get_next(int, insn);
+ insn->immediate1.nbytes = 4;
+ insn->immediate2.value = get_next(int, insn);
+ insn->immediate2.nbytes = 4;
+ break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
+ }
+ insn->immediate1.got = insn->immediate2.got = 1;
+
+ return 1;
+err_out:
+ return 0;
+}
+
+/* Decode ptr16:16/32(Ap) */
+static int __get_immptr(struct insn *insn)
+{
+ switch (insn->opnd_bytes) {
+ case 2:
+ insn->immediate1.value = get_next(short, insn);
+ insn->immediate1.nbytes = 2;
+ break;
+ case 4:
+ insn->immediate1.value = get_next(int, insn);
+ insn->immediate1.nbytes = 4;
+ break;
+ case 8:
+ /* ptr16:64 is not exist (no segment) */
+ return 0;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
+ }
+ insn->immediate2.value = get_next(unsigned short, insn);
+ insn->immediate2.nbytes = 2;
+ insn->immediate1.got = insn->immediate2.got = 1;
+
+ return 1;
+err_out:
+ return 0;
+}
+
+/**
+ * insn_get_immediate() - Get the immediates of instruction
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * displacement bytes.
+ * Basically, most of immediates are sign-expanded. Unsigned-value can be
+ * get by bit masking with ((1 << (nbytes * 8)) - 1)
+ */
+void insn_get_immediate(struct insn *insn)
+{
+ if (insn->immediate.got)
+ return;
+ if (!insn->displacement.got)
+ insn_get_displacement(insn);
+
+ if (inat_has_moffset(insn->attr)) {
+ if (!__get_moffset(insn))
+ goto err_out;
+ goto done;
+ }
+
+ if (!inat_has_immediate(insn->attr))
+ /* no immediates */
+ goto done;
+
+ switch (inat_immediate_size(insn->attr)) {
+ case INAT_IMM_BYTE:
+ insn->immediate.value = get_next(signed char, insn);
+ insn->immediate.nbytes = 1;
+ break;
+ case INAT_IMM_WORD:
+ insn->immediate.value = get_next(short, insn);
+ insn->immediate.nbytes = 2;
+ break;
+ case INAT_IMM_DWORD:
+ insn->immediate.value = get_next(int, insn);
+ insn->immediate.nbytes = 4;
+ break;
+ case INAT_IMM_QWORD:
+ insn->immediate1.value = get_next(int, insn);
+ insn->immediate1.nbytes = 4;
+ insn->immediate2.value = get_next(int, insn);
+ insn->immediate2.nbytes = 4;
+ break;
+ case INAT_IMM_PTR:
+ if (!__get_immptr(insn))
+ goto err_out;
+ break;
+ case INAT_IMM_VWORD32:
+ if (!__get_immv32(insn))
+ goto err_out;
+ break;
+ case INAT_IMM_VWORD:
+ if (!__get_immv(insn))
+ goto err_out;
+ break;
+ default:
+ /* Here, insn must have an immediate, but failed */
+ goto err_out;
+ }
+ if (inat_has_second_immediate(insn->attr)) {
+ insn->immediate2.value = get_next(signed char, insn);
+ insn->immediate2.nbytes = 1;
+ }
+done:
+ insn->immediate.got = 1;
+
+err_out:
+ return;
+}
+
+/**
+ * insn_get_length() - Get the length of instruction
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * immediates bytes.
+ */
+void insn_get_length(struct insn *insn)
+{
+ if (insn->length)
+ return;
+ if (!insn->immediate.got)
+ insn_get_immediate(insn);
+ insn->length = (unsigned char)((unsigned long)insn->next_byte
+ - (unsigned long)insn->kaddr);
+}
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
new file mode 100644
index 000000000000..1754e094bc28
--- /dev/null
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
@@ -0,0 +1,1063 @@
+# x86 Opcode Maps
+#
+# This is (mostly) based on following documentations.
+# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
+# (#326018-047US, June 2013)
+#
+#<Opcode maps>
+# Table: table-name
+# Referrer: escaped-name
+# AVXcode: avx-code
+# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+# (or)
+# opcode: escape # escaped-name
+# EndTable
+#
+# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
+# mnemonics that begin with lowercase 'k' accept a VEX prefix
+#
+#<group maps>
+# GrpTable: GrpXXX
+# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+# EndTable
+#
+# AVX Superscripts
+# (ev): this opcode requires EVEX prefix.
+# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
+# (v): this opcode requires VEX prefix.
+# (v1): this opcode only supports 128bit VEX.
+#
+# Last Prefix Superscripts
+# - (66): the last prefix is 0x66
+# - (F3): the last prefix is 0xF3
+# - (F2): the last prefix is 0xF2
+# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
+# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+
+Table: one byte opcode
+Referrer:
+AVXcode:
+# 0x00 - 0x0f
+00: ADD Eb,Gb
+01: ADD Ev,Gv
+02: ADD Gb,Eb
+03: ADD Gv,Ev
+04: ADD AL,Ib
+05: ADD rAX,Iz
+06: PUSH ES (i64)
+07: POP ES (i64)
+08: OR Eb,Gb
+09: OR Ev,Gv
+0a: OR Gb,Eb
+0b: OR Gv,Ev
+0c: OR AL,Ib
+0d: OR rAX,Iz
+0e: PUSH CS (i64)
+0f: escape # 2-byte escape
+# 0x10 - 0x1f
+10: ADC Eb,Gb
+11: ADC Ev,Gv
+12: ADC Gb,Eb
+13: ADC Gv,Ev
+14: ADC AL,Ib
+15: ADC rAX,Iz
+16: PUSH SS (i64)
+17: POP SS (i64)
+18: SBB Eb,Gb
+19: SBB Ev,Gv
+1a: SBB Gb,Eb
+1b: SBB Gv,Ev
+1c: SBB AL,Ib
+1d: SBB rAX,Iz
+1e: PUSH DS (i64)
+1f: POP DS (i64)
+# 0x20 - 0x2f
+20: AND Eb,Gb
+21: AND Ev,Gv
+22: AND Gb,Eb
+23: AND Gv,Ev
+24: AND AL,Ib
+25: AND rAx,Iz
+26: SEG=ES (Prefix)
+27: DAA (i64)
+28: SUB Eb,Gb
+29: SUB Ev,Gv
+2a: SUB Gb,Eb
+2b: SUB Gv,Ev
+2c: SUB AL,Ib
+2d: SUB rAX,Iz
+2e: SEG=CS (Prefix)
+2f: DAS (i64)
+# 0x30 - 0x3f
+30: XOR Eb,Gb
+31: XOR Ev,Gv
+32: XOR Gb,Eb
+33: XOR Gv,Ev
+34: XOR AL,Ib
+35: XOR rAX,Iz
+36: SEG=SS (Prefix)
+37: AAA (i64)
+38: CMP Eb,Gb
+39: CMP Ev,Gv
+3a: CMP Gb,Eb
+3b: CMP Gv,Ev
+3c: CMP AL,Ib
+3d: CMP rAX,Iz
+3e: SEG=DS (Prefix)
+3f: AAS (i64)
+# 0x40 - 0x4f
+40: INC eAX (i64) | REX (o64)
+41: INC eCX (i64) | REX.B (o64)
+42: INC eDX (i64) | REX.X (o64)
+43: INC eBX (i64) | REX.XB (o64)
+44: INC eSP (i64) | REX.R (o64)
+45: INC eBP (i64) | REX.RB (o64)
+46: INC eSI (i64) | REX.RX (o64)
+47: INC eDI (i64) | REX.RXB (o64)
+48: DEC eAX (i64) | REX.W (o64)
+49: DEC eCX (i64) | REX.WB (o64)
+4a: DEC eDX (i64) | REX.WX (o64)
+4b: DEC eBX (i64) | REX.WXB (o64)
+4c: DEC eSP (i64) | REX.WR (o64)
+4d: DEC eBP (i64) | REX.WRB (o64)
+4e: DEC eSI (i64) | REX.WRX (o64)
+4f: DEC eDI (i64) | REX.WRXB (o64)
+# 0x50 - 0x5f
+50: PUSH rAX/r8 (d64)
+51: PUSH rCX/r9 (d64)
+52: PUSH rDX/r10 (d64)
+53: PUSH rBX/r11 (d64)
+54: PUSH rSP/r12 (d64)
+55: PUSH rBP/r13 (d64)
+56: PUSH rSI/r14 (d64)
+57: PUSH rDI/r15 (d64)
+58: POP rAX/r8 (d64)
+59: POP rCX/r9 (d64)
+5a: POP rDX/r10 (d64)
+5b: POP rBX/r11 (d64)
+5c: POP rSP/r12 (d64)
+5d: POP rBP/r13 (d64)
+5e: POP rSI/r14 (d64)
+5f: POP rDI/r15 (d64)
+# 0x60 - 0x6f
+60: PUSHA/PUSHAD (i64)
+61: POPA/POPAD (i64)
+62: BOUND Gv,Ma (i64) | EVEX (Prefix)
+63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
+64: SEG=FS (Prefix)
+65: SEG=GS (Prefix)
+66: Operand-Size (Prefix)
+67: Address-Size (Prefix)
+68: PUSH Iz (d64)
+69: IMUL Gv,Ev,Iz
+6a: PUSH Ib (d64)
+6b: IMUL Gv,Ev,Ib
+6c: INS/INSB Yb,DX
+6d: INS/INSW/INSD Yz,DX
+6e: OUTS/OUTSB DX,Xb
+6f: OUTS/OUTSW/OUTSD DX,Xz
+# 0x70 - 0x7f
+70: JO Jb
+71: JNO Jb
+72: JB/JNAE/JC Jb
+73: JNB/JAE/JNC Jb
+74: JZ/JE Jb
+75: JNZ/JNE Jb
+76: JBE/JNA Jb
+77: JNBE/JA Jb
+78: JS Jb
+79: JNS Jb
+7a: JP/JPE Jb
+7b: JNP/JPO Jb
+7c: JL/JNGE Jb
+7d: JNL/JGE Jb
+7e: JLE/JNG Jb
+7f: JNLE/JG Jb
+# 0x80 - 0x8f
+80: Grp1 Eb,Ib (1A)
+81: Grp1 Ev,Iz (1A)
+82: Grp1 Eb,Ib (1A),(i64)
+83: Grp1 Ev,Ib (1A)
+84: TEST Eb,Gb
+85: TEST Ev,Gv
+86: XCHG Eb,Gb
+87: XCHG Ev,Gv
+88: MOV Eb,Gb
+89: MOV Ev,Gv
+8a: MOV Gb,Eb
+8b: MOV Gv,Ev
+8c: MOV Ev,Sw
+8d: LEA Gv,M
+8e: MOV Sw,Ew
+8f: Grp1A (1A) | POP Ev (d64)
+# 0x90 - 0x9f
+90: NOP | PAUSE (F3) | XCHG r8,rAX
+91: XCHG rCX/r9,rAX
+92: XCHG rDX/r10,rAX
+93: XCHG rBX/r11,rAX
+94: XCHG rSP/r12,rAX
+95: XCHG rBP/r13,rAX
+96: XCHG rSI/r14,rAX
+97: XCHG rDI/r15,rAX
+98: CBW/CWDE/CDQE
+99: CWD/CDQ/CQO
+9a: CALLF Ap (i64)
+9b: FWAIT/WAIT
+9c: PUSHF/D/Q Fv (d64)
+9d: POPF/D/Q Fv (d64)
+9e: SAHF
+9f: LAHF
+# 0xa0 - 0xaf
+a0: MOV AL,Ob
+a1: MOV rAX,Ov
+a2: MOV Ob,AL
+a3: MOV Ov,rAX
+a4: MOVS/B Yb,Xb
+a5: MOVS/W/D/Q Yv,Xv
+a6: CMPS/B Xb,Yb
+a7: CMPS/W/D Xv,Yv
+a8: TEST AL,Ib
+a9: TEST rAX,Iz
+aa: STOS/B Yb,AL
+ab: STOS/W/D/Q Yv,rAX
+ac: LODS/B AL,Xb
+ad: LODS/W/D/Q rAX,Xv
+ae: SCAS/B AL,Yb
+# Note: The May 2011 Intel manual shows Xv for the second parameter of the
+# next instruction but Yv is correct
+af: SCAS/W/D/Q rAX,Yv
+# 0xb0 - 0xbf
+b0: MOV AL/R8L,Ib
+b1: MOV CL/R9L,Ib
+b2: MOV DL/R10L,Ib
+b3: MOV BL/R11L,Ib
+b4: MOV AH/R12L,Ib
+b5: MOV CH/R13L,Ib
+b6: MOV DH/R14L,Ib
+b7: MOV BH/R15L,Ib
+b8: MOV rAX/r8,Iv
+b9: MOV rCX/r9,Iv
+ba: MOV rDX/r10,Iv
+bb: MOV rBX/r11,Iv
+bc: MOV rSP/r12,Iv
+bd: MOV rBP/r13,Iv
+be: MOV rSI/r14,Iv
+bf: MOV rDI/r15,Iv
+# 0xc0 - 0xcf
+c0: Grp2 Eb,Ib (1A)
+c1: Grp2 Ev,Ib (1A)
+c2: RETN Iw (f64)
+c3: RETN
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+c6: Grp11A Eb,Ib (1A)
+c7: Grp11B Ev,Iz (1A)
+c8: ENTER Iw,Ib
+c9: LEAVE (d64)
+ca: RETF Iw
+cb: RETF
+cc: INT3
+cd: INT Ib
+ce: INTO (i64)
+cf: IRET/D/Q
+# 0xd0 - 0xdf
+d0: Grp2 Eb,1 (1A)
+d1: Grp2 Ev,1 (1A)
+d2: Grp2 Eb,CL (1A)
+d3: Grp2 Ev,CL (1A)
+d4: AAM Ib (i64)
+d5: AAD Ib (i64)
+d6:
+d7: XLAT/XLATB
+d8: ESC
+d9: ESC
+da: ESC
+db: ESC
+dc: ESC
+dd: ESC
+de: ESC
+df: ESC
+# 0xe0 - 0xef
+# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
+# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
+# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
+e0: LOOPNE/LOOPNZ Jb (f64)
+e1: LOOPE/LOOPZ Jb (f64)
+e2: LOOP Jb (f64)
+e3: JrCXZ Jb (f64)
+e4: IN AL,Ib
+e5: IN eAX,Ib
+e6: OUT Ib,AL
+e7: OUT Ib,eAX
+# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
+# in "near" jumps and calls is 16-bit. For CALL,
+# push of return address is 16-bit wide, RSP is decremented by 2
+# but is not truncated to 16 bits, unlike RIP.
+e8: CALL Jz (f64)
+e9: JMP-near Jz (f64)
+ea: JMP-far Ap (i64)
+eb: JMP-short Jb (f64)
+ec: IN AL,DX
+ed: IN eAX,DX
+ee: OUT DX,AL
+ef: OUT DX,eAX
+# 0xf0 - 0xff
+f0: LOCK (Prefix)
+f1:
+f2: REPNE (Prefix) | XACQUIRE (Prefix)
+f3: REP/REPE (Prefix) | XRELEASE (Prefix)
+f4: HLT
+f5: CMC
+f6: Grp3_1 Eb (1A)
+f7: Grp3_2 Ev (1A)
+f8: CLC
+f9: STC
+fa: CLI
+fb: STI
+fc: CLD
+fd: STD
+fe: Grp4 (1A)
+ff: Grp5 (1A)
+EndTable
+
+Table: 2-byte opcode (0x0f)
+Referrer: 2-byte escape
+AVXcode: 1
+# 0x0f 0x00-0x0f
+00: Grp6 (1A)
+01: Grp7 (1A)
+02: LAR Gv,Ew
+03: LSL Gv,Ew
+04:
+05: SYSCALL (o64)
+06: CLTS
+07: SYSRET (o64)
+08: INVD
+09: WBINVD
+0a:
+0b: UD2 (1B)
+0c:
+# AMD's prefetch group. Intel supports prefetchw(/1) only.
+0d: GrpP
+0e: FEMMS
+# 3DNow! uses the last imm byte as opcode extension.
+0f: 3DNow! Pq,Qq,Ib
+# 0x0f 0x10-0x1f
+# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
+# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
+# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
+# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
+# Reference A.1
+10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
+11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
+12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
+13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
+14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
+15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
+16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
+17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
+18: Grp16 (1A)
+19:
+# Intel SDM opcode map does not list MPX instructions. For now using Gv for
+# bnd registers and Ev for everything else is OK because the instruction
+# decoder does not use the information except as an indication that there is
+# a ModR/M byte.
+1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
+1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
+1c:
+1d:
+1e:
+1f: NOP Ev
+# 0x0f 0x20-0x2f
+20: MOV Rd,Cd
+21: MOV Rd,Dd
+22: MOV Cd,Rd
+23: MOV Dd,Rd
+24:
+25:
+26:
+27:
+28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
+29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
+2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
+2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
+2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
+2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
+2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
+2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
+# 0x0f 0x30-0x3f
+30: WRMSR
+31: RDTSC
+32: RDMSR
+33: RDPMC
+34: SYSENTER
+35: SYSEXIT
+36:
+37: GETSEC
+38: escape # 3-byte escape 1
+39:
+3a: escape # 3-byte escape 2
+3b:
+3c:
+3d:
+3e:
+3f:
+# 0x0f 0x40-0x4f
+40: CMOVO Gv,Ev
+41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
+42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
+43: CMOVAE/NB/NC Gv,Ev
+44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
+45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
+46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
+47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
+48: CMOVS Gv,Ev
+49: CMOVNS Gv,Ev
+4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
+4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
+4c: CMOVL/NGE Gv,Ev
+4d: CMOVNL/GE Gv,Ev
+4e: CMOVLE/NG Gv,Ev
+4f: CMOVNLE/G Gv,Ev
+# 0x0f 0x50-0x5f
+50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
+51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
+52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
+53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
+54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
+55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
+56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
+57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
+58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
+59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
+5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
+5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
+5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
+5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
+5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
+# 0x0f 0x60-0x6f
+60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
+61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
+62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
+63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
+64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
+65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
+66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
+67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
+68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
+69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
+6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
+6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
+6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
+6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
+6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
+6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
+# 0x0f 0x70-0x7f
+70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
+71: Grp12 (1A)
+72: Grp13 (1A)
+73: Grp14 (1A)
+74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
+75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
+76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
+# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
+77: emms | vzeroupper | vzeroall
+78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
+79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
+7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
+7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
+7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
+7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
+7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
+7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
+# 0x0f 0x80-0x8f
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+80: JO Jz (f64)
+81: JNO Jz (f64)
+82: JB/JC/JNAE Jz (f64)
+83: JAE/JNB/JNC Jz (f64)
+84: JE/JZ Jz (f64)
+85: JNE/JNZ Jz (f64)
+86: JBE/JNA Jz (f64)
+87: JA/JNBE Jz (f64)
+88: JS Jz (f64)
+89: JNS Jz (f64)
+8a: JP/JPE Jz (f64)
+8b: JNP/JPO Jz (f64)
+8c: JL/JNGE Jz (f64)
+8d: JNL/JGE Jz (f64)
+8e: JLE/JNG Jz (f64)
+8f: JNLE/JG Jz (f64)
+# 0x0f 0x90-0x9f
+90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
+91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
+92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
+93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
+94: SETE/Z Eb
+95: SETNE/NZ Eb
+96: SETBE/NA Eb
+97: SETA/NBE Eb
+98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
+99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
+9a: SETP/PE Eb
+9b: SETNP/PO Eb
+9c: SETL/NGE Eb
+9d: SETNL/GE Eb
+9e: SETLE/NG Eb
+9f: SETNLE/G Eb
+# 0x0f 0xa0-0xaf
+a0: PUSH FS (d64)
+a1: POP FS (d64)
+a2: CPUID
+a3: BT Ev,Gv
+a4: SHLD Ev,Gv,Ib
+a5: SHLD Ev,Gv,CL
+a6: GrpPDLK
+a7: GrpRNG
+a8: PUSH GS (d64)
+a9: POP GS (d64)
+aa: RSM
+ab: BTS Ev,Gv
+ac: SHRD Ev,Gv,Ib
+ad: SHRD Ev,Gv,CL
+ae: Grp15 (1A),(1C)
+af: IMUL Gv,Ev
+# 0x0f 0xb0-0xbf
+b0: CMPXCHG Eb,Gb
+b1: CMPXCHG Ev,Gv
+b2: LSS Gv,Mp
+b3: BTR Ev,Gv
+b4: LFS Gv,Mp
+b5: LGS Gv,Mp
+b6: MOVZX Gv,Eb
+b7: MOVZX Gv,Ew
+b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
+b9: Grp10 (1A)
+ba: Grp8 Ev,Ib (1A)
+bb: BTC Ev,Gv
+bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
+be: MOVSX Gv,Eb
+bf: MOVSX Gv,Ew
+# 0x0f 0xc0-0xcf
+c0: XADD Eb,Gb
+c1: XADD Ev,Gv
+c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
+c3: movnti My,Gy
+c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
+c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
+c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
+c7: Grp9 (1A)
+c8: BSWAP RAX/EAX/R8/R8D
+c9: BSWAP RCX/ECX/R9/R9D
+ca: BSWAP RDX/EDX/R10/R10D
+cb: BSWAP RBX/EBX/R11/R11D
+cc: BSWAP RSP/ESP/R12/R12D
+cd: BSWAP RBP/EBP/R13/R13D
+ce: BSWAP RSI/ESI/R14/R14D
+cf: BSWAP RDI/EDI/R15/R15D
+# 0x0f 0xd0-0xdf
+d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
+d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
+d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
+d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
+d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
+d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
+d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
+d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
+d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
+d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
+da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
+db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
+dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
+dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
+de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
+df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0xe0-0xef
+e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
+e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
+e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
+e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
+e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
+e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
+e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
+e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
+e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
+e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
+ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
+eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
+ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
+ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
+ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
+ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0xf0-0xff
+f0: vlddqu Vx,Mx (F2)
+f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
+f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
+f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
+f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
+f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
+f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
+f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
+f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
+f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
+fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
+fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
+fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
+fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
+fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
+ff:
+EndTable
+
+Table: 3-byte opcode 1 (0x0f 0x38)
+Referrer: 3-byte escape 1
+AVXcode: 2
+# 0x0f 0x38 0x00-0x0f
+00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
+01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
+02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
+03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
+04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
+05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
+06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
+07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
+08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
+09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
+0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
+0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
+0c: vpermilps Vx,Hx,Wx (66),(v)
+0d: vpermilpd Vx,Hx,Wx (66),(v)
+0e: vtestps Vx,Wx (66),(v)
+0f: vtestpd Vx,Wx (66),(v)
+# 0x0f 0x38 0x10-0x1f
+10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
+11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
+12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
+13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
+14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
+15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
+16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
+17: vptest Vx,Wx (66)
+18: vbroadcastss Vx,Wd (66),(v)
+19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
+1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
+1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
+1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
+1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
+1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
+1f: vpabsq Vx,Wx (66),(ev)
+# 0x0f 0x38 0x20-0x2f
+20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
+21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
+22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
+23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
+24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
+25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
+26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
+27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
+28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
+29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
+2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
+2b: vpackusdw Vx,Hx,Wx (66),(v1)
+2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
+2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
+2e: vmaskmovps Mx,Hx,Vx (66),(v)
+2f: vmaskmovpd Mx,Hx,Vx (66),(v)
+# 0x0f 0x38 0x30-0x3f
+30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
+31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
+32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
+33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
+34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
+35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
+36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
+37: vpcmpgtq Vx,Hx,Wx (66),(v1)
+38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
+39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
+3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
+3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
+3c: vpmaxsb Vx,Hx,Wx (66),(v1)
+3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
+3e: vpmaxuw Vx,Hx,Wx (66),(v1)
+3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0x38 0x40-0x8f
+40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
+41: vphminposuw Vdq,Wdq (66),(v1)
+42: vgetexpps/d Vx,Wx (66),(ev)
+43: vgetexpss/d Vx,Hx,Wx (66),(ev)
+44: vplzcntd/q Vx,Wx (66),(ev)
+45: vpsrlvd/q Vx,Hx,Wx (66),(v)
+46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
+47: vpsllvd/q Vx,Hx,Wx (66),(v)
+# Skip 0x48-0x4b
+4c: vrcp14ps/d Vpd,Wpd (66),(ev)
+4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+# Skip 0x50-0x57
+58: vpbroadcastd Vx,Wx (66),(v)
+59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
+5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
+5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
+# Skip 0x5c-0x63
+64: vpblendmd/q Vx,Hx,Wx (66),(ev)
+65: vblendmps/d Vx,Hx,Wx (66),(ev)
+66: vpblendmb/w Vx,Hx,Wx (66),(ev)
+# Skip 0x67-0x74
+75: vpermi2b/w Vx,Hx,Wx (66),(ev)
+76: vpermi2d/q Vx,Hx,Wx (66),(ev)
+77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
+78: vpbroadcastb Vx,Wx (66),(v)
+79: vpbroadcastw Vx,Wx (66),(v)
+7a: vpbroadcastb Vx,Rv (66),(ev)
+7b: vpbroadcastw Vx,Rv (66),(ev)
+7c: vpbroadcastd/q Vx,Rv (66),(ev)
+7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
+7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
+7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
+80: INVEPT Gy,Mdq (66)
+81: INVPID Gy,Mdq (66)
+82: INVPCID Gy,Mdq (66)
+83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
+88: vexpandps/d Vpd,Wpd (66),(ev)
+89: vpexpandd/q Vx,Wx (66),(ev)
+8a: vcompressps/d Wx,Vx (66),(ev)
+8b: vpcompressd/q Wx,Vx (66),(ev)
+8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+8d: vpermb/w Vx,Hx,Wx (66),(ev)
+8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
+# 0x0f 0x38 0x90-0xbf (FMA)
+90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
+91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
+92: vgatherdps/d Vx,Hx,Wx (66),(v)
+93: vgatherqps/d Vx,Hx,Wx (66),(v)
+94:
+95:
+96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
+97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
+98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
+99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
+9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
+9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+a0: vpscatterdd/q Wx,Vx (66),(ev)
+a1: vpscatterqd/q Wx,Vx (66),(ev)
+a2: vscatterdps/d Wx,Vx (66),(ev)
+a3: vscatterqps/d Wx,Vx (66),(ev)
+a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
+a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
+a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
+a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
+ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
+af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
+b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
+b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
+b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
+b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
+b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
+bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
+bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
+bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+# 0x0f 0x38 0xc0-0xff
+c4: vpconflictd/q Vx,Wx (66),(ev)
+c6: Grp18 (1A)
+c7: Grp19 (1A)
+c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
+c9: sha1msg1 Vdq,Wdq
+ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
+cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
+cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
+cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
+db: VAESIMC Vdq,Wdq (66),(v1)
+dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
+dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
+de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
+df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
+f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
+f2: ANDN Gy,By,Ey (v)
+f3: Grp17 (1A)
+f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
+f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+EndTable
+
+Table: 3-byte opcode 2 (0x0f 0x3a)
+Referrer: 3-byte escape 2
+AVXcode: 3
+# 0x0f 0x3a 0x00-0xff
+00: vpermq Vqq,Wqq,Ib (66),(v)
+01: vpermpd Vqq,Wqq,Ib (66),(v)
+02: vpblendd Vx,Hx,Wx,Ib (66),(v)
+03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
+04: vpermilps Vx,Wx,Ib (66),(v)
+05: vpermilpd Vx,Wx,Ib (66),(v)
+06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
+07:
+08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
+09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
+0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
+0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
+0c: vblendps Vx,Hx,Wx,Ib (66)
+0d: vblendpd Vx,Hx,Wx,Ib (66)
+0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
+0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
+14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
+15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
+16: vpextrd/q Ey,Vdq,Ib (66),(v1)
+17: vextractps Ed,Vdq,Ib (66),(v1)
+18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
+1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
+20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
+21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
+22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
+23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
+26: vgetmantps/d Vx,Wx,Ib (66),(ev)
+27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
+30: kshiftrb/w Vk,Uk,Ib (66),(v)
+31: kshiftrd/q Vk,Uk,Ib (66),(v)
+32: kshiftlb/w Vk,Uk,Ib (66),(v)
+33: kshiftld/q Vk,Uk,Ib (66),(v)
+38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
+3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
+40: vdpps Vx,Hx,Wx,Ib (66)
+41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
+42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
+43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
+4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
+4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
+4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
+51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
+54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
+55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
+56: vreduceps/d Vx,Wx,Ib (66),(ev)
+57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
+60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
+61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
+62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
+63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+66: vfpclassps/d Vk,Wx,Ib (66),(ev)
+67: vfpclassss/d Vk,Wx,Ib (66),(ev)
+cc: sha1rnds4 Vdq,Wdq,Ib
+df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
+f0: RORX Gy,Ey,Ib (F2),(v)
+EndTable
+
+GrpTable: Grp1
+0: ADD
+1: OR
+2: ADC
+3: SBB
+4: AND
+5: SUB
+6: XOR
+7: CMP
+EndTable
+
+GrpTable: Grp1A
+0: POP
+EndTable
+
+GrpTable: Grp2
+0: ROL
+1: ROR
+2: RCL
+3: RCR
+4: SHL/SAL
+5: SHR
+6:
+7: SAR
+EndTable
+
+GrpTable: Grp3_1
+0: TEST Eb,Ib
+1: TEST Eb,Ib
+2: NOT Eb
+3: NEG Eb
+4: MUL AL,Eb
+5: IMUL AL,Eb
+6: DIV AL,Eb
+7: IDIV AL,Eb
+EndTable
+
+GrpTable: Grp3_2
+0: TEST Ev,Iz
+1:
+2: NOT Ev
+3: NEG Ev
+4: MUL rAX,Ev
+5: IMUL rAX,Ev
+6: DIV rAX,Ev
+7: IDIV rAX,Ev
+EndTable
+
+GrpTable: Grp4
+0: INC Eb
+1: DEC Eb
+EndTable
+
+GrpTable: Grp5
+0: INC Ev
+1: DEC Ev
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+2: CALLN Ev (f64)
+3: CALLF Ep
+4: JMPN Ev (f64)
+5: JMPF Mp
+6: PUSH Ev (d64)
+7:
+EndTable
+
+GrpTable: Grp6
+0: SLDT Rv/Mw
+1: STR Rv/Mw
+2: LLDT Ew
+3: LTR Ew
+4: VERR Ew
+5: VERW Ew
+EndTable
+
+GrpTable: Grp7
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+3: LIDT Ms
+4: SMSW Mw/Rv
+5: rdpkru (110),(11B) | wrpkru (111),(11B)
+6: LMSW Ew
+7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
+EndTable
+
+GrpTable: Grp8
+4: BT
+5: BTS
+6: BTR
+7: BTC
+EndTable
+
+GrpTable: Grp9
+1: CMPXCHG8B/16B Mq/Mdq
+3: xrstors
+4: xsavec
+5: xsaves
+6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
+EndTable
+
+GrpTable: Grp10
+EndTable
+
+# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
+GrpTable: Grp11A
+0: MOV Eb,Ib
+7: XABORT Ib (000),(11B)
+EndTable
+
+GrpTable: Grp11B
+0: MOV Eb,Iz
+7: XBEGIN Jz (000),(11B)
+EndTable
+
+GrpTable: Grp12
+2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
+4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
+6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp13
+0: vprord/q Hx,Wx,Ib (66),(ev)
+1: vprold/q Hx,Wx,Ib (66),(ev)
+2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
+4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
+6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp14
+2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
+3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
+6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
+7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp15
+0: fxsave | RDFSBASE Ry (F3),(11B)
+1: fxstor | RDGSBASE Ry (F3),(11B)
+2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
+3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
+4: XSAVE
+5: XRSTOR | lfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B)
+7: clflush | clflushopt (66) | sfence (11B)
+EndTable
+
+GrpTable: Grp16
+0: prefetch NTA
+1: prefetch T0
+2: prefetch T1
+3: prefetch T2
+EndTable
+
+GrpTable: Grp17
+1: BLSR By,Ey (v)
+2: BLSMSK By,Ey (v)
+3: BLSI By,Ey (v)
+EndTable
+
+GrpTable: Grp18
+1: vgatherpf0dps/d Wx (66),(ev)
+2: vgatherpf1dps/d Wx (66),(ev)
+5: vscatterpf0dps/d Wx (66),(ev)
+6: vscatterpf1dps/d Wx (66),(ev)
+EndTable
+
+GrpTable: Grp19
+1: vgatherpf0qps/d Wx (66),(ev)
+2: vgatherpf1qps/d Wx (66),(ev)
+5: vscatterpf0qps/d Wx (66),(ev)
+6: vscatterpf1qps/d Wx (66),(ev)
+EndTable
+
+# AMD's Prefetch Group
+GrpTable: GrpP
+0: PREFETCH
+1: PREFETCHW
+EndTable
+
+GrpTable: GrpPDLK
+0: MONTMUL
+1: XSHA1
+2: XSHA2
+EndTable
+
+GrpTable: GrpRNG
+0: xstore-rng
+1: xcrypt-ecb
+2: xcrypt-cbc
+4: xcrypt-cfb
+5: xcrypt-ofb
+EndTable
diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
new file mode 100644
index 000000000000..a3d2c62fd805
--- /dev/null
+++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
@@ -0,0 +1,392 @@
+#!/bin/awk -f
+# gen-insn-attr-x86.awk: Instruction attribute table generator
+# Written by Masami Hiramatsu <mhiramat@xxxxxxxxxx>
+#
+# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
+
+# Awk implementation sanity check
+function check_awk_implement() {
+ if (sprintf("%x", 0) != "0")
+ return "Your awk has a printf-format problem."
+ return ""
+}
+
+# Clear working vars
+function clear_vars() {
+ delete table
+ delete lptable2
+ delete lptable1
+ delete lptable3
+ eid = -1 # escape id
+ gid = -1 # group id
+ aid = -1 # AVX id
+ tname = ""
+}
+
+BEGIN {
+ # Implementation error checking
+ awkchecked = check_awk_implement()
+ if (awkchecked != "") {
+ print "Error: " awkchecked > "/dev/stderr"
+ print "Please try to use gawk." > "/dev/stderr"
+ exit 1
+ }
+
+ # Setup generating tables
+ print "/* x86 opcode map generated from x86-opcode-map.txt */"
+ print "/* Do not change this code. */\n"
+ ggid = 1
+ geid = 1
+ gaid = 0
+ delete etable
+ delete gtable
+ delete atable
+
+ opnd_expr = "^[A-Za-z/]"
+ ext_expr = "^\\("
+ sep_expr = "^\\|$"
+ group_expr = "^Grp[0-9A-Za-z]+"
+
+ imm_expr = "^[IJAOL][a-z]"
+ imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+ imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+ imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
+ imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
+ imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
+ imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
+ imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+ imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+ imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
+ imm_flag["Ob"] = "INAT_MOFFSET"
+ imm_flag["Ov"] = "INAT_MOFFSET"
+ imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+
+ modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
+ force64_expr = "\\([df]64\\)"
+ rex_expr = "^REX(\\.[XRWB]+)*"
+ fpu_expr = "^ESC" # TODO
+
+ lprefix1_expr = "\\((66|!F3)\\)"
+ lprefix2_expr = "\\(F3\\)"
+ lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+ lprefix_expr = "\\((66|F2|F3)\\)"
+ max_lprefix = 4
+
+ # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
+ # accepts VEX prefix
+ vexok_opcode_expr = "^[vk].*"
+ vexok_expr = "\\(v1\\)"
+ # All opcodes with (v) superscript supports *only* VEX prefix
+ vexonly_expr = "\\(v\\)"
+ # All opcodes with (ev) superscript supports *only* EVEX prefix
+ evexonly_expr = "\\(ev\\)"
+
+ prefix_expr = "\\(Prefix\\)"
+ prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
+ prefix_num["REPNE"] = "INAT_PFX_REPNE"
+ prefix_num["REP/REPE"] = "INAT_PFX_REPE"
+ prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
+ prefix_num["XRELEASE"] = "INAT_PFX_REPE"
+ prefix_num["LOCK"] = "INAT_PFX_LOCK"
+ prefix_num["SEG=CS"] = "INAT_PFX_CS"
+ prefix_num["SEG=DS"] = "INAT_PFX_DS"
+ prefix_num["SEG=ES"] = "INAT_PFX_ES"
+ prefix_num["SEG=FS"] = "INAT_PFX_FS"
+ prefix_num["SEG=GS"] = "INAT_PFX_GS"
+ prefix_num["SEG=SS"] = "INAT_PFX_SS"
+ prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
+ prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
+ prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
+ prefix_num["EVEX"] = "INAT_PFX_EVEX"
+
+ clear_vars()
+}
+
+function semantic_error(msg) {
+ print "Semantic error at " NR ": " msg > "/dev/stderr"
+ exit 1
+}
+
+function debug(msg) {
+ print "DEBUG: " msg
+}
+
+function array_size(arr, i,c) {
+ c = 0
+ for (i in arr)
+ c++
+ return c
+}
+
+/^Table:/ {
+ print "/* " $0 " */"
+ if (tname != "")
+ semantic_error("Hit Table: before EndTable:.");
+}
+
+/^Referrer:/ {
+ if (NF != 1) {
+ # escape opcode table
+ ref = ""
+ for (i = 2; i <= NF; i++)
+ ref = ref $i
+ eid = escape[ref]
+ tname = sprintf("inat_escape_table_%d", eid)
+ }
+}
+
+/^AVXcode:/ {
+ if (NF != 1) {
+ # AVX/escape opcode table
+ aid = $2
+ if (gaid <= aid)
+ gaid = aid + 1
+ if (tname == "") # AVX only opcode table
+ tname = sprintf("inat_avx_table_%d", $2)
+ }
+ if (aid == -1 && eid == -1) # primary opcode table
+ tname = "inat_primary_table"
+}
+
+/^GrpTable:/ {
+ print "/* " $0 " */"
+ if (!($2 in group))
+ semantic_error("No group: " $2 )
+ gid = group[$2]
+ tname = "inat_group_table_" gid
+}
+
+function print_table(tbl,name,fmt,n)
+{
+ print "const insn_attr_t " name " = {"
+ for (i = 0; i < n; i++) {
+ id = sprintf(fmt, i)
+ if (tbl[id])
+ print " [" id "] = " tbl[id] ","
+ }
+ print "};"
+}
+
+/^EndTable/ {
+ if (gid != -1) {
+ # print group tables
+ if (array_size(table) != 0) {
+ print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
+ "0x%x", 8)
+ gtable[gid,0] = tname
+ }
+ if (array_size(lptable1) != 0) {
+ print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
+ "0x%x", 8)
+ gtable[gid,1] = tname "_1"
+ }
+ if (array_size(lptable2) != 0) {
+ print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
+ "0x%x", 8)
+ gtable[gid,2] = tname "_2"
+ }
+ if (array_size(lptable3) != 0) {
+ print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
+ "0x%x", 8)
+ gtable[gid,3] = tname "_3"
+ }
+ } else {
+ # print primary/escaped tables
+ if (array_size(table) != 0) {
+ print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
+ "0x%02x", 256)
+ etable[eid,0] = tname
+ if (aid >= 0)
+ atable[aid,0] = tname
+ }
+ if (array_size(lptable1) != 0) {
+ print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
+ "0x%02x", 256)
+ etable[eid,1] = tname "_1"
+ if (aid >= 0)
+ atable[aid,1] = tname "_1"
+ }
+ if (array_size(lptable2) != 0) {
+ print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
+ "0x%02x", 256)
+ etable[eid,2] = tname "_2"
+ if (aid >= 0)
+ atable[aid,2] = tname "_2"
+ }
+ if (array_size(lptable3) != 0) {
+ print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
+ "0x%02x", 256)
+ etable[eid,3] = tname "_3"
+ if (aid >= 0)
+ atable[aid,3] = tname "_3"
+ }
+ }
+ print ""
+ clear_vars()
+}
+
+function add_flags(old,new) {
+ if (old && new)
+ return old " | " new
+ else if (old)
+ return old
+ else
+ return new
+}
+
+# convert operands to flags.
+function convert_operands(count,opnd, i,j,imm,mod)
+{
+ imm = null
+ mod = null
+ for (j = 1; j <= count; j++) {
+ i = opnd[j]
+ if (match(i, imm_expr) == 1) {
+ if (!imm_flag[i])
+ semantic_error("Unknown imm opnd: " i)
+ if (imm) {
+ if (i != "Ib")
+ semantic_error("Second IMM error")
+ imm = add_flags(imm, "INAT_SCNDIMM")
+ } else
+ imm = imm_flag[i]
+ } else if (match(i, modrm_expr))
+ mod = "INAT_MODRM"
+ }
+ return add_flags(imm, mod)
+}
+
+/^[0-9a-f]+\:/ {
+ if (NR == 1)
+ next
+ # get index
+ idx = "0x" substr($1, 1, index($1,":") - 1)
+ if (idx in table)
+ semantic_error("Redefine " idx " in " tname)
+
+ # check if escaped opcode
+ if ("escape" == $2) {
+ if ($3 != "#")
+ semantic_error("No escaped name")
+ ref = ""
+ for (i = 4; i <= NF; i++)
+ ref = ref $i
+ if (ref in escape)
+ semantic_error("Redefine escape (" ref ")")
+ escape[ref] = geid
+ geid++
+ table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
+ next
+ }
+
+ variant = null
+ # converts
+ i = 2
+ while (i <= NF) {
+ opcode = $(i++)
+ delete opnds
+ ext = null
+ flags = null
+ opnd = null
+ # parse one opcode
+ if (match($i, opnd_expr)) {
+ opnd = $i
+ count = split($(i++), opnds, ",")
+ flags = convert_operands(count, opnds)
+ }
+ if (match($i, ext_expr))
+ ext = $(i++)
+ if (match($i, sep_expr))
+ i++
+ else if (i < NF)
+ semantic_error($i " is not a separator")
+
+ # check if group opcode
+ if (match(opcode, group_expr)) {
+ if (!(opcode in group)) {
+ group[opcode] = ggid
+ ggid++
+ }
+ flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
+ }
+ # check force(or default) 64bit
+ if (match(ext, force64_expr))
+ flags = add_flags(flags, "INAT_FORCE64")
+
+ # check REX prefix
+ if (match(opcode, rex_expr))
+ flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
+
+ # check coprocessor escape : TODO
+ if (match(opcode, fpu_expr))
+ flags = add_flags(flags, "INAT_MODRM")
+
+ # check VEX codes
+ if (match(ext, evexonly_expr))
+ flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
+ else if (match(ext, vexonly_expr))
+ flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
+ else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
+ flags = add_flags(flags, "INAT_VEXOK")
+
+ # check prefixes
+ if (match(ext, prefix_expr)) {
+ if (!prefix_num[opcode])
+ semantic_error("Unknown prefix: " opcode)
+ flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
+ }
+ if (length(flags) == 0)
+ continue
+ # check if last prefix
+ if (match(ext, lprefix1_expr)) {
+ lptable1[idx] = add_flags(lptable1[idx],flags)
+ variant = "INAT_VARIANT"
+ }
+ if (match(ext, lprefix2_expr)) {
+ lptable2[idx] = add_flags(lptable2[idx],flags)
+ variant = "INAT_VARIANT"
+ }
+ if (match(ext, lprefix3_expr)) {
+ lptable3[idx] = add_flags(lptable3[idx],flags)
+ variant = "INAT_VARIANT"
+ }
+ if (!match(ext, lprefix_expr)){
+ table[idx] = add_flags(table[idx],flags)
+ }
+ }
+ if (variant)
+ table[idx] = add_flags(table[idx],variant)
+}
+
+END {
+ if (awkchecked != "")
+ exit 1
+ # print escape opcode map's array
+ print "/* Escape opcode map array */"
+ print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
+ "[INAT_LSTPFX_MAX + 1] = {"
+ for (i = 0; i < geid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (etable[i,j])
+ print " ["i"]["j"] = "etable[i,j]","
+ print "};\n"
+ # print group opcode map's array
+ print "/* Group opcode map array */"
+ print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
+ "[INAT_LSTPFX_MAX + 1] = {"
+ for (i = 0; i < ggid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (gtable[i,j])
+ print " ["i"]["j"] = "gtable[i,j]","
+ print "};\n"
+ # print AVX opcode map's array
+ print "/* AVX opcode map array */"
+ print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
+ "[INAT_LSTPFX_MAX + 1] = {"
+ for (i = 0; i < gaid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (atable[i,j])
+ print " ["i"]["j"] = "atable[i,j]","
+ print "};"
+}
+
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index a688a857a7ae..694abc628e9b 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -25,1300 +25,35 @@
* For more information, see tools/objtool/Documentation/stack-validation.txt.
*/

-#include <string.h>
-#include <stdlib.h>
#include <subcmd/parse-options.h>
-
#include "builtin.h"
-#include "elf.h"
-#include "special.h"
-#include "arch.h"
-#include "warn.h"
-
-#include <linux/hashtable.h>
-
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
-#define STATE_FP_SAVED 0x1
-#define STATE_FP_SETUP 0x2
-#define STATE_FENTRY 0x4
-
-struct instruction {
- struct list_head list;
- struct hlist_node hash;
- struct section *sec;
- unsigned long offset;
- unsigned int len, state;
- unsigned char type;
- unsigned long immediate;
- bool alt_group, visited, ignore_alts;
- struct symbol *call_dest;
- struct instruction *jump_dest;
- struct list_head alts;
- struct symbol *func;
-};
-
-struct alternative {
- struct list_head list;
- struct instruction *insn;
-};
-
-struct objtool_file {
- struct elf *elf;
- struct list_head insn_list;
- DECLARE_HASHTABLE(insn_hash, 16);
- struct section *rodata, *whitelist;
- bool ignore_unreachables, c_file;
-};
-
-const char *objname;
-static bool nofp;
-
-static struct instruction *find_insn(struct objtool_file *file,
- struct section *sec, unsigned long offset)
-{
- struct instruction *insn;
-
- hash_for_each_possible(file->insn_hash, insn, hash, offset)
- if (insn->sec == sec && insn->offset == offset)
- return insn;
-
- return NULL;
-}
-
-static struct instruction *next_insn_same_sec(struct objtool_file *file,
- struct instruction *insn)
-{
- struct instruction *next = list_next_entry(insn, list);
-
- if (&next->list == &file->insn_list || next->sec != insn->sec)
- return NULL;
-
- return next;
-}
-
-static bool gcov_enabled(struct objtool_file *file)
-{
- struct section *sec;
- struct symbol *sym;
-
- list_for_each_entry(sec, &file->elf->sections, list)
- list_for_each_entry(sym, &sec->symbol_list, list)
- if (!strncmp(sym->name, "__gcov_.", 8))
- return true;
-
- return false;
-}
-
-#define for_each_insn(file, insn) \
- list_for_each_entry(insn, &file->insn_list, list)
-
-#define func_for_each_insn(file, func, insn) \
- for (insn = find_insn(file, func->sec, func->offset); \
- insn && &insn->list != &file->insn_list && \
- insn->sec == func->sec && \
- insn->offset < func->offset + func->len; \
- insn = list_next_entry(insn, list))
-
-#define func_for_each_insn_continue_reverse(file, func, insn) \
- for (insn = list_prev_entry(insn, list); \
- &insn->list != &file->insn_list && \
- insn->sec == func->sec && insn->offset >= func->offset; \
- insn = list_prev_entry(insn, list))
-
-#define sec_for_each_insn_from(file, insn) \
- for (; insn; insn = next_insn_same_sec(file, insn))
-
-
-/*
- * Check if the function has been manually whitelisted with the
- * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
- * due to its use of a context switching instruction.
- */
-static bool ignore_func(struct objtool_file *file, struct symbol *func)
-{
- struct rela *rela;
- struct instruction *insn;
-
- /* check for STACK_FRAME_NON_STANDARD */
- if (file->whitelist && file->whitelist->rela)
- list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
- if (rela->sym->type == STT_SECTION &&
- rela->sym->sec == func->sec &&
- rela->addend == func->offset)
- return true;
- if (rela->sym->type == STT_FUNC && rela->sym == func)
- return true;
- }
-
- /* check if it has a context switching instruction */
- func_for_each_insn(file, func, insn)
- if (insn->type == INSN_CONTEXT_SWITCH)
- return true;
-
- return false;
-}
-
-/*
- * This checks to see if the given function is a "noreturn" function.
- *
- * For global functions which are outside the scope of this object file, we
- * have to keep a manual list of them.
- *
- * For local functions, we have to detect them manually by simply looking for
- * the lack of a return instruction.
- *
- * Returns:
- * -1: error
- * 0: no dead end
- * 1: dead end
- */
-static int __dead_end_function(struct objtool_file *file, struct symbol *func,
- int recursion)
-{
- int i;
- struct instruction *insn;
- bool empty = true;
-
- /*
- * Unfortunately these have to be hard coded because the noreturn
- * attribute isn't provided in ELF data.
- */
- static const char * const global_noreturns[] = {
- "__stack_chk_fail",
- "panic",
- "do_exit",
- "do_task_dead",
- "__module_put_and_exit",
- "complete_and_exit",
- "kvm_spurious_fault",
- "__reiserfs_panic",
- "lbug_with_loc"
- };
-
- if (func->bind == STB_WEAK)
- return 0;
-
- if (func->bind == STB_GLOBAL)
- for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
- if (!strcmp(func->name, global_noreturns[i]))
- return 1;
-
- if (!func->sec)
- return 0;
-
- func_for_each_insn(file, func, insn) {
- empty = false;
-
- if (insn->type == INSN_RETURN)
- return 0;
- }
-
- if (empty)
- return 0;
-
- /*
- * A function can have a sibling call instead of a return. In that
- * case, the function's dead-end status depends on whether the target
- * of the sibling call returns.
- */
- func_for_each_insn(file, func, insn) {
- if (insn->sec != func->sec ||
- insn->offset >= func->offset + func->len)
- break;
-
- if (insn->type == INSN_JUMP_UNCONDITIONAL) {
- struct instruction *dest = insn->jump_dest;
- struct symbol *dest_func;
-
- if (!dest)
- /* sibling call to another file */
- return 0;
-
- if (dest->sec != func->sec ||
- dest->offset < func->offset ||
- dest->offset >= func->offset + func->len) {
- /* local sibling call */
- dest_func = find_symbol_by_offset(dest->sec,
- dest->offset);
- if (!dest_func)
- continue;
-
- if (recursion == 5) {
- WARN_FUNC("infinite recursion (objtool bug!)",
- dest->sec, dest->offset);
- return -1;
- }
-
- return __dead_end_function(file, dest_func,
- recursion + 1);
- }
- }
-
- if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
- /* sibling call */
- return 0;
- }
-
- return 1;
-}
-
-static int dead_end_function(struct objtool_file *file, struct symbol *func)
-{
- return __dead_end_function(file, func, 0);
-}
-
-/*
- * Call the arch-specific instruction decoder for all the instructions and add
- * them to the global instruction list.
- */
-static int decode_instructions(struct objtool_file *file)
-{
- struct section *sec;
- struct symbol *func;
- unsigned long offset;
- struct instruction *insn;
- int ret;
-
- list_for_each_entry(sec, &file->elf->sections, list) {
-
- if (!(sec->sh.sh_flags & SHF_EXECINSTR))
- continue;
-
- for (offset = 0; offset < sec->len; offset += insn->len) {
- insn = malloc(sizeof(*insn));
- memset(insn, 0, sizeof(*insn));
-
- INIT_LIST_HEAD(&insn->alts);
- insn->sec = sec;
- insn->offset = offset;
-
- ret = arch_decode_instruction(file->elf, sec, offset,
- sec->len - offset,
- &insn->len, &insn->type,
- &insn->immediate);
- if (ret)
- return ret;
-
- if (!insn->type || insn->type > INSN_LAST) {
- WARN_FUNC("invalid instruction type %d",
- insn->sec, insn->offset, insn->type);
- return -1;
- }
-
- hash_add(file->insn_hash, &insn->hash, insn->offset);
- list_add_tail(&insn->list, &file->insn_list);
- }
-
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC)
- continue;
-
- if (!find_insn(file, sec, func->offset)) {
- WARN("%s(): can't find starting instruction",
- func->name);
- return -1;
- }
-
- func_for_each_insn(file, func, insn)
- if (!insn->func)
- insn->func = func;
- }
- }
-
- return 0;
-}
-
-/*
- * Warnings shouldn't be reported for ignored functions.
- */
-static void add_ignores(struct objtool_file *file)
-{
- struct instruction *insn;
- struct section *sec;
- struct symbol *func;
-
- list_for_each_entry(sec, &file->elf->sections, list) {
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC)
- continue;
-
- if (!ignore_func(file, func))
- continue;
-
- func_for_each_insn(file, func, insn)
- insn->visited = true;
- }
- }
-}
-
-/*
- * FIXME: For now, just ignore any alternatives which add retpolines. This is
- * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
- * But it at least allows objtool to understand the control flow *around* the
- * retpoline.
- */
-static int add_nospec_ignores(struct objtool_file *file)
-{
- struct section *sec;
- struct rela *rela;
- struct instruction *insn;
-
- sec = find_section_by_name(file->elf, ".rela.discard.nospec");
- if (!sec)
- return 0;
-
- list_for_each_entry(rela, &sec->rela_list, list) {
- if (rela->sym->type != STT_SECTION) {
- WARN("unexpected relocation symbol type in %s", sec->name);
- return -1;
- }
-
- insn = find_insn(file, rela->sym->sec, rela->addend);
- if (!insn) {
- WARN("bad .discard.nospec entry");
- return -1;
- }
-
- insn->ignore_alts = true;
- }
-
- return 0;
-}
-
-/*
- * Find the destination instructions for all jumps.
- */
-static int add_jump_destinations(struct objtool_file *file)
-{
- struct instruction *insn;
- struct rela *rela;
- struct section *dest_sec;
- unsigned long dest_off;
-
- for_each_insn(file, insn) {
- if (insn->type != INSN_JUMP_CONDITIONAL &&
- insn->type != INSN_JUMP_UNCONDITIONAL)
- continue;
-
- /* skip ignores */
- if (insn->visited)
- continue;
-
- rela = find_rela_by_dest_range(insn->sec, insn->offset,
- insn->len);
- if (!rela) {
- dest_sec = insn->sec;
- dest_off = insn->offset + insn->len + insn->immediate;
- } else if (rela->sym->type == STT_SECTION) {
- dest_sec = rela->sym->sec;
- dest_off = rela->addend + 4;
- } else if (rela->sym->sec->idx) {
- dest_sec = rela->sym->sec;
- dest_off = rela->sym->sym.st_value + rela->addend + 4;
- } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
- /*
- * Retpoline jumps are really dynamic jumps in
- * disguise, so convert them accordingly.
- */
- insn->type = INSN_JUMP_DYNAMIC;
- continue;
- } else {
- /* sibling call */
- insn->jump_dest = 0;
- continue;
- }
-
- insn->jump_dest = find_insn(file, dest_sec, dest_off);
- if (!insn->jump_dest) {
-
- /*
- * This is a special case where an alt instruction
- * jumps past the end of the section. These are
- * handled later in handle_group_alt().
- */
- if (!strcmp(insn->sec->name, ".altinstr_replacement"))
- continue;
-
- WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
- insn->sec, insn->offset, dest_sec->name,
- dest_off);
- return -1;
- }
- }
-
- return 0;
-}
-
-/*
- * Find the destination instructions for all calls.
- */
-static int add_call_destinations(struct objtool_file *file)
-{
- struct instruction *insn;
- unsigned long dest_off;
- struct rela *rela;
-
- for_each_insn(file, insn) {
- if (insn->type != INSN_CALL)
- continue;
-
- rela = find_rela_by_dest_range(insn->sec, insn->offset,
- insn->len);
- if (!rela) {
- dest_off = insn->offset + insn->len + insn->immediate;
- insn->call_dest = find_symbol_by_offset(insn->sec,
- dest_off);
- /*
- * FIXME: Thanks to retpolines, it's now considered
- * normal for a function to call within itself. So
- * disable this warning for now.
- */
-#if 0
- if (!insn->call_dest) {
- WARN_FUNC("can't find call dest symbol at offset 0x%lx",
- insn->sec, insn->offset, dest_off);
- return -1;
- }
-#endif
- } else if (rela->sym->type == STT_SECTION) {
- insn->call_dest = find_symbol_by_offset(rela->sym->sec,
- rela->addend+4);
- if (!insn->call_dest ||
- insn->call_dest->type != STT_FUNC) {
- WARN_FUNC("can't find call dest symbol at %s+0x%x",
- insn->sec, insn->offset,
- rela->sym->sec->name,
- rela->addend + 4);
- return -1;
- }
- } else
- insn->call_dest = rela->sym;
- }
-
- return 0;
-}
-
-/*
- * The .alternatives section requires some extra special care, over and above
- * what other special sections require:
- *
- * 1. Because alternatives are patched in-place, we need to insert a fake jump
- * instruction at the end so that validate_branch() skips all the original
- * replaced instructions when validating the new instruction path.
- *
- * 2. An added wrinkle is that the new instruction length might be zero. In
- * that case the old instructions are replaced with noops. We simulate that
- * by creating a fake jump as the only new instruction.
- *
- * 3. In some cases, the alternative section includes an instruction which
- * conditionally jumps to the _end_ of the entry. We have to modify these
- * jumps' destinations to point back to .text rather than the end of the
- * entry in .altinstr_replacement.
- *
- * 4. It has been requested that we don't validate the !POPCNT feature path
- * which is a "very very small percentage of machines".
- */
-static int handle_group_alt(struct objtool_file *file,
- struct special_alt *special_alt,
- struct instruction *orig_insn,
- struct instruction **new_insn)
-{
- struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
- unsigned long dest_off;
-
- last_orig_insn = NULL;
- insn = orig_insn;
- sec_for_each_insn_from(file, insn) {
- if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
- break;
-
- if (special_alt->skip_orig)
- insn->type = INSN_NOP;
-
- insn->alt_group = true;
- last_orig_insn = insn;
- }
-
- if (!next_insn_same_sec(file, last_orig_insn)) {
- WARN("%s: don't know how to handle alternatives at end of section",
- special_alt->orig_sec->name);
- return -1;
- }
-
- fake_jump = malloc(sizeof(*fake_jump));
- if (!fake_jump) {
- WARN("malloc failed");
- return -1;
- }
- memset(fake_jump, 0, sizeof(*fake_jump));
- INIT_LIST_HEAD(&fake_jump->alts);
- fake_jump->sec = special_alt->new_sec;
- fake_jump->offset = -1;
- fake_jump->type = INSN_JUMP_UNCONDITIONAL;
- fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
-
- if (!special_alt->new_len) {
- *new_insn = fake_jump;
- return 0;
- }
-
- last_new_insn = NULL;
- insn = *new_insn;
- sec_for_each_insn_from(file, insn) {
- if (insn->offset >= special_alt->new_off + special_alt->new_len)
- break;
-
- last_new_insn = insn;
-
- if (insn->type != INSN_JUMP_CONDITIONAL &&
- insn->type != INSN_JUMP_UNCONDITIONAL)
- continue;
-
- if (!insn->immediate)
- continue;
-
- dest_off = insn->offset + insn->len + insn->immediate;
- if (dest_off == special_alt->new_off + special_alt->new_len)
- insn->jump_dest = fake_jump;
-
- if (!insn->jump_dest) {
- WARN_FUNC("can't find alternative jump destination",
- insn->sec, insn->offset);
- return -1;
- }
- }
-
- if (!last_new_insn) {
- WARN_FUNC("can't find last new alternative instruction",
- special_alt->new_sec, special_alt->new_off);
- return -1;
- }
-
- list_add(&fake_jump->list, &last_new_insn->list);
-
- return 0;
-}
-
-/*
- * A jump table entry can either convert a nop to a jump or a jump to a nop.
- * If the original instruction is a jump, make the alt entry an effective nop
- * by just skipping the original instruction.
- */
-static int handle_jump_alt(struct objtool_file *file,
- struct special_alt *special_alt,
- struct instruction *orig_insn,
- struct instruction **new_insn)
-{
- if (orig_insn->type == INSN_NOP)
- return 0;
-
- if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
- WARN_FUNC("unsupported instruction at jump label",
- orig_insn->sec, orig_insn->offset);
- return -1;
- }
-
- *new_insn = list_next_entry(orig_insn, list);
- return 0;
-}
-
-/*
- * Read all the special sections which have alternate instructions which can be
- * patched in or redirected to at runtime. Each instruction having alternate
- * instruction(s) has them added to its insn->alts list, which will be
- * traversed in validate_branch().
- */
-static int add_special_section_alts(struct objtool_file *file)
-{
- struct list_head special_alts;
- struct instruction *orig_insn, *new_insn;
- struct special_alt *special_alt, *tmp;
- struct alternative *alt;
- int ret;
-
- ret = special_get_alts(file->elf, &special_alts);
- if (ret)
- return ret;
-
- list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
-
- orig_insn = find_insn(file, special_alt->orig_sec,
- special_alt->orig_off);
- if (!orig_insn) {
- WARN_FUNC("special: can't find orig instruction",
- special_alt->orig_sec, special_alt->orig_off);
- ret = -1;
- goto out;
- }
-
- /* Ignore retpoline alternatives. */
- if (orig_insn->ignore_alts)
- continue;
-
- new_insn = NULL;
- if (!special_alt->group || special_alt->new_len) {
- new_insn = find_insn(file, special_alt->new_sec,
- special_alt->new_off);
- if (!new_insn) {
- WARN_FUNC("special: can't find new instruction",
- special_alt->new_sec,
- special_alt->new_off);
- ret = -1;
- goto out;
- }
- }
+#include "check.h"

- if (special_alt->group) {
- ret = handle_group_alt(file, special_alt, orig_insn,
- &new_insn);
- if (ret)
- goto out;
- } else if (special_alt->jump_or_nop) {
- ret = handle_jump_alt(file, special_alt, orig_insn,
- &new_insn);
- if (ret)
- goto out;
- }
+bool no_fp, no_unreachable, retpoline, module;

- alt = malloc(sizeof(*alt));
- if (!alt) {
- WARN("malloc failed");
- ret = -1;
- goto out;
- }
-
- alt->insn = new_insn;
- list_add_tail(&alt->list, &orig_insn->alts);
-
- list_del(&special_alt->list);
- free(special_alt);
- }
-
-out:
- return ret;
-}
-
-static int add_switch_table(struct objtool_file *file, struct symbol *func,
- struct instruction *insn, struct rela *table,
- struct rela *next_table)
-{
- struct rela *rela = table;
- struct instruction *alt_insn;
- struct alternative *alt;
-
- list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
- if (rela == next_table)
- break;
-
- if (rela->sym->sec != insn->sec ||
- rela->addend <= func->offset ||
- rela->addend >= func->offset + func->len)
- break;
-
- alt_insn = find_insn(file, insn->sec, rela->addend);
- if (!alt_insn) {
- WARN("%s: can't find instruction at %s+0x%x",
- file->rodata->rela->name, insn->sec->name,
- rela->addend);
- return -1;
- }
-
- alt = malloc(sizeof(*alt));
- if (!alt) {
- WARN("malloc failed");
- return -1;
- }
-
- alt->insn = alt_insn;
- list_add_tail(&alt->list, &insn->alts);
- }
-
- return 0;
-}
-
-/*
- * find_switch_table() - Given a dynamic jump, find the switch jump table in
- * .rodata associated with it.
- *
- * There are 3 basic patterns:
- *
- * 1. jmpq *[rodata addr](,%reg,8)
- *
- * This is the most common case by far. It jumps to an address in a simple
- * jump table which is stored in .rodata.
- *
- * 2. jmpq *[rodata addr](%rip)
- *
- * This is caused by a rare GCC quirk, currently only seen in three driver
- * functions in the kernel, only with certain obscure non-distro configs.
- *
- * As part of an optimization, GCC makes a copy of an existing switch jump
- * table, modifies it, and then hard-codes the jump (albeit with an indirect
- * jump) to use a single entry in the table. The rest of the jump table and
- * some of its jump targets remain as dead code.
- *
- * In such a case we can just crudely ignore all unreachable instruction
- * warnings for the entire object file. Ideally we would just ignore them
- * for the function, but that would require redesigning the code quite a
- * bit. And honestly that's just not worth doing: unreachable instruction
- * warnings are of questionable value anyway, and this is such a rare issue.
- *
- * 3. mov [rodata addr],%reg1
- * ... some instructions ...
- * jmpq *(%reg1,%reg2,8)
- *
- * This is a fairly uncommon pattern which is new for GCC 6. As of this
- * writing, there are 11 occurrences of it in the allmodconfig kernel.
- *
- * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
- * ensure the same register is used in the mov and jump instructions.
- */
-static struct rela *find_switch_table(struct objtool_file *file,
- struct symbol *func,
- struct instruction *insn)
-{
- struct rela *text_rela, *rodata_rela;
- struct instruction *orig_insn = insn;
-
- text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
- if (text_rela && text_rela->sym == file->rodata->sym) {
- /* case 1 */
- rodata_rela = find_rela_by_dest(file->rodata,
- text_rela->addend);
- if (rodata_rela)
- return rodata_rela;
-
- /* case 2 */
- rodata_rela = find_rela_by_dest(file->rodata,
- text_rela->addend + 4);
- if (!rodata_rela)
- return NULL;
- file->ignore_unreachables = true;
- return rodata_rela;
- }
-
- /* case 3 */
- func_for_each_insn_continue_reverse(file, func, insn) {
- if (insn->type == INSN_JUMP_DYNAMIC)
- break;
-
- /* allow small jumps within the range */
- if (insn->type == INSN_JUMP_UNCONDITIONAL &&
- insn->jump_dest &&
- (insn->jump_dest->offset <= insn->offset ||
- insn->jump_dest->offset > orig_insn->offset))
- break;
-
- /* look for a relocation which references .rodata */
- text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
- insn->len);
- if (!text_rela || text_rela->sym != file->rodata->sym)
- continue;
-
- /*
- * Make sure the .rodata address isn't associated with a
- * symbol. gcc jump tables are anonymous data.
- */
- if (find_symbol_containing(file->rodata, text_rela->addend))
- continue;
-
- return find_rela_by_dest(file->rodata, text_rela->addend);
- }
-
- return NULL;
-}
-
-static int add_func_switch_tables(struct objtool_file *file,
- struct symbol *func)
-{
- struct instruction *insn, *prev_jump = NULL;
- struct rela *rela, *prev_rela = NULL;
- int ret;
-
- func_for_each_insn(file, func, insn) {
- if (insn->type != INSN_JUMP_DYNAMIC)
- continue;
-
- rela = find_switch_table(file, func, insn);
- if (!rela)
- continue;
-
- /*
- * We found a switch table, but we don't know yet how big it
- * is. Don't add it until we reach the end of the function or
- * the beginning of another switch table in the same function.
- */
- if (prev_jump) {
- ret = add_switch_table(file, func, prev_jump, prev_rela,
- rela);
- if (ret)
- return ret;
- }
-
- prev_jump = insn;
- prev_rela = rela;
- }
-
- if (prev_jump) {
- ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/*
- * For some switch statements, gcc generates a jump table in the .rodata
- * section which contains a list of addresses within the function to jump to.
- * This finds these jump tables and adds them to the insn->alts lists.
- */
-static int add_switch_table_alts(struct objtool_file *file)
-{
- struct section *sec;
- struct symbol *func;
- int ret;
-
- if (!file->rodata || !file->rodata->rela)
- return 0;
-
- list_for_each_entry(sec, &file->elf->sections, list) {
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC)
- continue;
-
- ret = add_func_switch_tables(file, func);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int decode_sections(struct objtool_file *file)
-{
- int ret;
-
- ret = decode_instructions(file);
- if (ret)
- return ret;
-
- add_ignores(file);
-
- ret = add_nospec_ignores(file);
- if (ret)
- return ret;
-
- ret = add_jump_destinations(file);
- if (ret)
- return ret;
-
- ret = add_call_destinations(file);
- if (ret)
- return ret;
-
- ret = add_special_section_alts(file);
- if (ret)
- return ret;
-
- ret = add_switch_table_alts(file);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static bool is_fentry_call(struct instruction *insn)
-{
- if (insn->type == INSN_CALL &&
- insn->call_dest->type == STT_NOTYPE &&
- !strcmp(insn->call_dest->name, "__fentry__"))
- return true;
-
- return false;
-}
-
-static bool has_modified_stack_frame(struct instruction *insn)
-{
- return (insn->state & STATE_FP_SAVED) ||
- (insn->state & STATE_FP_SETUP);
-}
-
-static bool has_valid_stack_frame(struct instruction *insn)
-{
- return (insn->state & STATE_FP_SAVED) &&
- (insn->state & STATE_FP_SETUP);
-}
-
-static unsigned int frame_state(unsigned long state)
-{
- return (state & (STATE_FP_SAVED | STATE_FP_SETUP));
-}
-
-/*
- * Follow the branch starting at the given instruction, and recursively follow
- * any other branches (jumps). Meanwhile, track the frame pointer state at
- * each instruction and validate all the rules described in
- * tools/objtool/Documentation/stack-validation.txt.
- */
-static int validate_branch(struct objtool_file *file,
- struct instruction *first, unsigned char first_state)
-{
- struct alternative *alt;
- struct instruction *insn;
- struct section *sec;
- struct symbol *func = NULL;
- unsigned char state;
- int ret;
-
- insn = first;
- sec = insn->sec;
- state = first_state;
-
- if (insn->alt_group && list_empty(&insn->alts)) {
- WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
- sec, insn->offset);
- return 1;
- }
-
- while (1) {
- if (file->c_file && insn->func) {
- if (func && func != insn->func) {
- WARN("%s() falls through to next function %s()",
- func->name, insn->func->name);
- return 1;
- }
-
- func = insn->func;
- }
-
- if (insn->visited) {
- if (frame_state(insn->state) != frame_state(state)) {
- WARN_FUNC("frame pointer state mismatch",
- sec, insn->offset);
- return 1;
- }
-
- return 0;
- }
-
- insn->visited = true;
- insn->state = state;
-
- list_for_each_entry(alt, &insn->alts, list) {
- ret = validate_branch(file, alt->insn, state);
- if (ret)
- return 1;
- }
-
- switch (insn->type) {
-
- case INSN_FP_SAVE:
- if (!nofp) {
- if (state & STATE_FP_SAVED) {
- WARN_FUNC("duplicate frame pointer save",
- sec, insn->offset);
- return 1;
- }
- state |= STATE_FP_SAVED;
- }
- break;
-
- case INSN_FP_SETUP:
- if (!nofp) {
- if (state & STATE_FP_SETUP) {
- WARN_FUNC("duplicate frame pointer setup",
- sec, insn->offset);
- return 1;
- }
- state |= STATE_FP_SETUP;
- }
- break;
-
- case INSN_FP_RESTORE:
- if (!nofp) {
- if (has_valid_stack_frame(insn))
- state &= ~STATE_FP_SETUP;
-
- state &= ~STATE_FP_SAVED;
- }
- break;
-
- case INSN_RETURN:
- if (!nofp && has_modified_stack_frame(insn)) {
- WARN_FUNC("return without frame pointer restore",
- sec, insn->offset);
- return 1;
- }
- return 0;
-
- case INSN_CALL:
- if (is_fentry_call(insn)) {
- state |= STATE_FENTRY;
- break;
- }
-
- ret = dead_end_function(file, insn->call_dest);
- if (ret == 1)
- return 0;
- if (ret == -1)
- return 1;
-
- /* fallthrough */
- case INSN_CALL_DYNAMIC:
- if (!nofp && !has_valid_stack_frame(insn)) {
- WARN_FUNC("call without frame pointer save/setup",
- sec, insn->offset);
- return 1;
- }
- break;
-
- case INSN_JUMP_CONDITIONAL:
- case INSN_JUMP_UNCONDITIONAL:
- if (insn->jump_dest) {
- ret = validate_branch(file, insn->jump_dest,
- state);
- if (ret)
- return 1;
- } else if (has_modified_stack_frame(insn)) {
- WARN_FUNC("sibling call from callable instruction with changed frame pointer",
- sec, insn->offset);
- return 1;
- } /* else it's a sibling call */
-
- if (insn->type == INSN_JUMP_UNCONDITIONAL)
- return 0;
-
- break;
-
- case INSN_JUMP_DYNAMIC:
- if (list_empty(&insn->alts) &&
- has_modified_stack_frame(insn)) {
- WARN_FUNC("sibling call from callable instruction with changed frame pointer",
- sec, insn->offset);
- return 1;
- }
-
- return 0;
-
- case INSN_BUG:
- return 0;
-
- default:
- break;
- }
-
- insn = next_insn_same_sec(file, insn);
- if (!insn) {
- WARN("%s: unexpected end of section", sec->name);
- return 1;
- }
- }
-
- return 0;
-}
-
-static bool is_kasan_insn(struct instruction *insn)
-{
- return (insn->type == INSN_CALL &&
- !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
-}
-
-static bool is_ubsan_insn(struct instruction *insn)
-{
- return (insn->type == INSN_CALL &&
- !strcmp(insn->call_dest->name,
- "__ubsan_handle_builtin_unreachable"));
-}
-
-static bool ignore_unreachable_insn(struct symbol *func,
- struct instruction *insn)
-{
- int i;
-
- if (insn->type == INSN_NOP)
- return true;
-
- /*
- * Check if this (or a subsequent) instruction is related to
- * CONFIG_UBSAN or CONFIG_KASAN.
- *
- * End the search at 5 instructions to avoid going into the weeds.
- */
- for (i = 0; i < 5; i++) {
-
- if (is_kasan_insn(insn) || is_ubsan_insn(insn))
- return true;
-
- if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
- insn = insn->jump_dest;
- continue;
- }
-
- if (insn->offset + insn->len >= func->offset + func->len)
- break;
- insn = list_next_entry(insn, list);
- }
-
- return false;
-}
-
-static int validate_functions(struct objtool_file *file)
-{
- struct section *sec;
- struct symbol *func;
- struct instruction *insn;
- int ret, warnings = 0;
-
- list_for_each_entry(sec, &file->elf->sections, list) {
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC)
- continue;
-
- insn = find_insn(file, sec, func->offset);
- if (!insn)
- continue;
-
- ret = validate_branch(file, insn, 0);
- warnings += ret;
- }
- }
-
- list_for_each_entry(sec, &file->elf->sections, list) {
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC)
- continue;
-
- func_for_each_insn(file, func, insn) {
- if (insn->visited)
- continue;
-
- insn->visited = true;
-
- if (file->ignore_unreachables || warnings ||
- ignore_unreachable_insn(func, insn))
- continue;
-
- /*
- * gcov produces a lot of unreachable
- * instructions. If we get an unreachable
- * warning and the file has gcov enabled, just
- * ignore it, and all other such warnings for
- * the file.
- */
- if (!file->ignore_unreachables &&
- gcov_enabled(file)) {
- file->ignore_unreachables = true;
- continue;
- }
-
- WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
- warnings++;
- }
- }
- }
-
- return warnings;
-}
-
-static int validate_uncallable_instructions(struct objtool_file *file)
-{
- struct instruction *insn;
- int warnings = 0;
-
- for_each_insn(file, insn) {
- if (!insn->visited && insn->type == INSN_RETURN) {
-
- /*
- * Don't warn about call instructions in unvisited
- * retpoline alternatives.
- */
- if (!strcmp(insn->sec->name, ".altinstr_replacement"))
- continue;
-
- WARN_FUNC("return instruction outside of a callable function",
- insn->sec, insn->offset);
- warnings++;
- }
- }
-
- return warnings;
-}
-
-static void cleanup(struct objtool_file *file)
-{
- struct instruction *insn, *tmpinsn;
- struct alternative *alt, *tmpalt;
-
- list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
- list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
- list_del(&alt->list);
- free(alt);
- }
- list_del(&insn->list);
- hash_del(&insn->hash);
- free(insn);
- }
- elf_close(file->elf);
-}
-
-const char * const check_usage[] = {
+static const char * const check_usage[] = {
"objtool check [<options>] file.o",
NULL,
};

+const struct option check_options[] = {
+ OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
+ OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
+ OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
+ OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
+ OPT_END(),
+};
+
int cmd_check(int argc, const char **argv)
{
- struct objtool_file file;
- int ret, warnings = 0;
-
- const struct option options[] = {
- OPT_BOOLEAN('f', "no-fp", &nofp, "Skip frame pointer validation"),
- OPT_END(),
- };
+ const char *objname;

- argc = parse_options(argc, argv, options, check_usage, 0);
+ argc = parse_options(argc, argv, check_options, check_usage, 0);

if (argc != 1)
- usage_with_options(check_usage, options);
+ usage_with_options(check_usage, check_options);

objname = argv[0];

- file.elf = elf_open(objname);
- if (!file.elf) {
- fprintf(stderr, "error reading elf file %s\n", objname);
- return 1;
- }
-
- INIT_LIST_HEAD(&file.insn_list);
- hash_init(file.insn_hash);
- file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
- file.rodata = find_section_by_name(file.elf, ".rodata");
- file.ignore_unreachables = false;
- file.c_file = find_section_by_name(file.elf, ".comment");
-
- ret = decode_sections(&file);
- if (ret < 0)
- goto out;
- warnings += ret;
-
- ret = validate_functions(&file);
- if (ret < 0)
- goto out;
- warnings += ret;
-
- ret = validate_uncallable_instructions(&file);
- if (ret < 0)
- goto out;
- warnings += ret;
-
-out:
- cleanup(&file);
-
- /* ignore warnings for now until we get all the code cleaned up */
- if (ret || warnings)
- return 0;
- return 0;
+ return check(objname, false);
}
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
new file mode 100644
index 000000000000..77ea2b97117d
--- /dev/null
+++ b/tools/objtool/builtin-orc.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * objtool orc:
+ *
+ * This command analyzes a .o file and adds .orc_unwind and .orc_unwind_ip
+ * sections to it, which is used by the in-kernel ORC unwinder.
+ *
+ * This command is a superset of "objtool check".
+ */
+
+#include <string.h>
+#include "builtin.h"
+#include "check.h"
+
+
+static const char *orc_usage[] = {
+ "objtool orc generate [<options>] file.o",
+ "objtool orc dump file.o",
+ NULL,
+};
+
+int cmd_orc(int argc, const char **argv)
+{
+ const char *objname;
+
+ argc--; argv++;
+ if (argc <= 0)
+ usage_with_options(orc_usage, check_options);
+
+ if (!strncmp(argv[0], "gen", 3)) {
+ argc = parse_options(argc, argv, check_options, orc_usage, 0);
+ if (argc != 1)
+ usage_with_options(orc_usage, check_options);
+
+ objname = argv[0];
+
+ return check(objname, true);
+ }
+
+ if (!strcmp(argv[0], "dump")) {
+ if (argc != 2)
+ usage_with_options(orc_usage, check_options);
+
+ objname = argv[1];
+
+ return orc_dump(objname);
+ }
+
+ usage_with_options(orc_usage, check_options);
+
+ return 0;
+}
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
index 34d2ba78a616..28ff40e19a14 100644
--- a/tools/objtool/builtin.h
+++ b/tools/objtool/builtin.h
@@ -17,6 +17,12 @@
#ifndef _BUILTIN_H
#define _BUILTIN_H

+#include <subcmd/parse-options.h>
+
+extern const struct option check_options[];
+extern bool no_fp, no_unreachable, retpoline, module;
+
extern int cmd_check(int argc, const char **argv);
+extern int cmd_orc(int argc, const char **argv);

#endif /* _BUILTIN_H */
diff --git a/tools/objtool/cfi.h b/tools/objtool/cfi.h
new file mode 100644
index 000000000000..2fe883c665c7
--- /dev/null
+++ b/tools/objtool/cfi.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _OBJTOOL_CFI_H
+#define _OBJTOOL_CFI_H
+
+#define CFI_UNDEFINED -1
+#define CFI_CFA -2
+#define CFI_SP_INDIRECT -3
+#define CFI_BP_INDIRECT -4
+
+#define CFI_AX 0
+#define CFI_DX 1
+#define CFI_CX 2
+#define CFI_BX 3
+#define CFI_SI 4
+#define CFI_DI 5
+#define CFI_BP 6
+#define CFI_SP 7
+#define CFI_R8 8
+#define CFI_R9 9
+#define CFI_R10 10
+#define CFI_R11 11
+#define CFI_R12 12
+#define CFI_R13 13
+#define CFI_R14 14
+#define CFI_R15 15
+#define CFI_RA 16
+#define CFI_NUM_REGS 17
+
+struct cfi_reg {
+ int base;
+ int offset;
+};
+
+struct cfi_state {
+ struct cfi_reg cfa;
+ struct cfi_reg regs[CFI_NUM_REGS];
+};
+
+#endif /* _OBJTOOL_CFI_H */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
new file mode 100644
index 000000000000..e128d1c71c30
--- /dev/null
+++ b/tools/objtool/check.c
@@ -0,0 +1,2209 @@
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "builtin.h"
+#include "check.h"
+#include "elf.h"
+#include "special.h"
+#include "arch.h"
+#include "warn.h"
+
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+
+struct alternative {
+ struct list_head list;
+ struct instruction *insn;
+};
+
+const char *objname;
+struct cfi_state initial_func_cfi;
+
+struct instruction *find_insn(struct objtool_file *file,
+ struct section *sec, unsigned long offset)
+{
+ struct instruction *insn;
+
+ hash_for_each_possible(file->insn_hash, insn, hash, offset)
+ if (insn->sec == sec && insn->offset == offset)
+ return insn;
+
+ return NULL;
+}
+
+static struct instruction *next_insn_same_sec(struct objtool_file *file,
+ struct instruction *insn)
+{
+ struct instruction *next = list_next_entry(insn, list);
+
+ if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
+ return NULL;
+
+ return next;
+}
+
+static struct instruction *next_insn_same_func(struct objtool_file *file,
+ struct instruction *insn)
+{
+ struct instruction *next = list_next_entry(insn, list);
+ struct symbol *func = insn->func;
+
+ if (!func)
+ return NULL;
+
+ if (&next->list != &file->insn_list && next->func == func)
+ return next;
+
+ /* Check if we're already in the subfunction: */
+ if (func == func->cfunc)
+ return NULL;
+
+ /* Move to the subfunction: */
+ return find_insn(file, func->cfunc->sec, func->cfunc->offset);
+}
+
+#define func_for_each_insn_all(file, func, insn) \
+ for (insn = find_insn(file, func->sec, func->offset); \
+ insn; \
+ insn = next_insn_same_func(file, insn))
+
+#define func_for_each_insn(file, func, insn) \
+ for (insn = find_insn(file, func->sec, func->offset); \
+ insn && &insn->list != &file->insn_list && \
+ insn->sec == func->sec && \
+ insn->offset < func->offset + func->len; \
+ insn = list_next_entry(insn, list))
+
+#define func_for_each_insn_continue_reverse(file, func, insn) \
+ for (insn = list_prev_entry(insn, list); \
+ &insn->list != &file->insn_list && \
+ insn->sec == func->sec && insn->offset >= func->offset; \
+ insn = list_prev_entry(insn, list))
+
+#define sec_for_each_insn_from(file, insn) \
+ for (; insn; insn = next_insn_same_sec(file, insn))
+
+#define sec_for_each_insn_continue(file, insn) \
+ for (insn = next_insn_same_sec(file, insn); insn; \
+ insn = next_insn_same_sec(file, insn))
+
+/*
+ * Check if the function has been manually whitelisted with the
+ * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
+ * due to its use of a context switching instruction.
+ */
+static bool ignore_func(struct objtool_file *file, struct symbol *func)
+{
+ struct rela *rela;
+
+ /* check for STACK_FRAME_NON_STANDARD */
+ if (file->whitelist && file->whitelist->rela)
+ list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+ if (rela->sym->type == STT_SECTION &&
+ rela->sym->sec == func->sec &&
+ rela->addend == func->offset)
+ return true;
+ if (rela->sym->type == STT_FUNC && rela->sym == func)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * This checks to see if the given function is a "noreturn" function.
+ *
+ * For global functions which are outside the scope of this object file, we
+ * have to keep a manual list of them.
+ *
+ * For local functions, we have to detect them manually by simply looking for
+ * the lack of a return instruction.
+ *
+ * Returns:
+ * -1: error
+ * 0: no dead end
+ * 1: dead end
+ */
+static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+ int recursion)
+{
+ int i;
+ struct instruction *insn;
+ bool empty = true;
+
+ /*
+ * Unfortunately these have to be hard coded because the noreturn
+ * attribute isn't provided in ELF data.
+ */
+ static const char * const global_noreturns[] = {
+ "__stack_chk_fail",
+ "panic",
+ "do_exit",
+ "do_task_dead",
+ "__module_put_and_exit",
+ "complete_and_exit",
+ "kvm_spurious_fault",
+ "__reiserfs_panic",
+ "lbug_with_loc",
+ "fortify_panic",
+ };
+
+ if (func->bind == STB_WEAK)
+ return 0;
+
+ if (func->bind == STB_GLOBAL)
+ for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
+ if (!strcmp(func->name, global_noreturns[i]))
+ return 1;
+
+ if (!func->len)
+ return 0;
+
+ insn = find_insn(file, func->sec, func->offset);
+ if (!insn->func)
+ return 0;
+
+ func_for_each_insn_all(file, func, insn) {
+ empty = false;
+
+ if (insn->type == INSN_RETURN)
+ return 0;
+ }
+
+ if (empty)
+ return 0;
+
+ /*
+ * A function can have a sibling call instead of a return. In that
+ * case, the function's dead-end status depends on whether the target
+ * of the sibling call returns.
+ */
+ func_for_each_insn_all(file, func, insn) {
+ if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+ struct instruction *dest = insn->jump_dest;
+
+ if (!dest)
+ /* sibling call to another file */
+ return 0;
+
+ if (dest->func && dest->func->pfunc != insn->func->pfunc) {
+
+ /* local sibling call */
+ if (recursion == 5) {
+ /*
+ * Infinite recursion: two functions
+ * have sibling calls to each other.
+ * This is a very rare case. It means
+ * they aren't dead ends.
+ */
+ return 0;
+ }
+
+ return __dead_end_function(file, dest->func,
+ recursion + 1);
+ }
+ }
+
+ if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
+ /* sibling call */
+ return 0;
+ }
+
+ return 1;
+}
+
+static int dead_end_function(struct objtool_file *file, struct symbol *func)
+{
+ return __dead_end_function(file, func, 0);
+}
+
+static void clear_insn_state(struct insn_state *state)
+{
+ int i;
+
+ memset(state, 0, sizeof(*state));
+ state->cfa.base = CFI_UNDEFINED;
+ for (i = 0; i < CFI_NUM_REGS; i++) {
+ state->regs[i].base = CFI_UNDEFINED;
+ state->vals[i].base = CFI_UNDEFINED;
+ }
+ state->drap_reg = CFI_UNDEFINED;
+ state->drap_offset = -1;
+}
+
+/*
+ * Call the arch-specific instruction decoder for all the instructions and add
+ * them to the global instruction list.
+ */
+static int decode_instructions(struct objtool_file *file)
+{
+ struct section *sec;
+ struct symbol *func;
+ unsigned long offset;
+ struct instruction *insn;
+ int ret;
+
+ for_each_sec(file, sec) {
+
+ if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ continue;
+
+ if (strcmp(sec->name, ".altinstr_replacement") &&
+ strcmp(sec->name, ".altinstr_aux") &&
+ strncmp(sec->name, ".discard.", 9))
+ sec->text = true;
+
+ for (offset = 0; offset < sec->len; offset += insn->len) {
+ insn = malloc(sizeof(*insn));
+ if (!insn) {
+ WARN("malloc failed");
+ return -1;
+ }
+ memset(insn, 0, sizeof(*insn));
+ INIT_LIST_HEAD(&insn->alts);
+ clear_insn_state(&insn->state);
+
+ insn->sec = sec;
+ insn->offset = offset;
+
+ ret = arch_decode_instruction(file->elf, sec, offset,
+ sec->len - offset,
+ &insn->len, &insn->type,
+ &insn->immediate,
+ &insn->stack_op);
+ if (ret)
+ goto err;
+
+ if (!insn->type || insn->type > INSN_LAST) {
+ WARN_FUNC("invalid instruction type %d",
+ insn->sec, insn->offset, insn->type);
+ ret = -1;
+ goto err;
+ }
+
+ hash_add(file->insn_hash, &insn->hash, insn->offset);
+ list_add_tail(&insn->list, &file->insn_list);
+ }
+
+ list_for_each_entry(func, &sec->symbol_list, list) {
+ if (func->type != STT_FUNC)
+ continue;
+
+ if (!find_insn(file, sec, func->offset)) {
+ WARN("%s(): can't find starting instruction",
+ func->name);
+ return -1;
+ }
+
+ func_for_each_insn(file, func, insn)
+ if (!insn->func)
+ insn->func = func;
+ }
+ }
+
+ return 0;
+
+err:
+ free(insn);
+ return ret;
+}
+
+/*
+ * Mark "ud2" instructions and manually annotated dead ends.
+ */
+static int add_dead_ends(struct objtool_file *file)
+{
+ struct section *sec;
+ struct rela *rela;
+ struct instruction *insn;
+ bool found;
+
+ /*
+ * By default, "ud2" is a dead end unless otherwise annotated, because
+ * GCC 7 inserts it for certain divide-by-zero cases.
+ */
+ for_each_insn(file, insn)
+ if (insn->type == INSN_BUG)
+ insn->dead_end = true;
+
+ /*
+ * Check for manually annotated dead ends.
+ */
+ sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
+ if (!sec)
+ goto reachable;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ if (rela->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (insn)
+ insn = list_prev_entry(insn, list);
+ else if (rela->addend == rela->sym->sec->len) {
+ found = false;
+ list_for_each_entry_reverse(insn, &file->insn_list, list) {
+ if (insn->sec == rela->sym->sec) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ WARN("can't find unreachable insn at %s+0x%x",
+ rela->sym->sec->name, rela->addend);
+ return -1;
+ }
+ } else {
+ WARN("can't find unreachable insn at %s+0x%x",
+ rela->sym->sec->name, rela->addend);
+ return -1;
+ }
+
+ insn->dead_end = true;
+ }
+
+reachable:
+ /*
+ * These manually annotated reachable checks are needed for GCC 4.4,
+ * where the Linux unreachable() macro isn't supported. In that case
+ * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
+ * not a dead end.
+ */
+ sec = find_section_by_name(file->elf, ".rela.discard.reachable");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ if (rela->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (insn)
+ insn = list_prev_entry(insn, list);
+ else if (rela->addend == rela->sym->sec->len) {
+ found = false;
+ list_for_each_entry_reverse(insn, &file->insn_list, list) {
+ if (insn->sec == rela->sym->sec) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ WARN("can't find reachable insn at %s+0x%x",
+ rela->sym->sec->name, rela->addend);
+ return -1;
+ }
+ } else {
+ WARN("can't find reachable insn at %s+0x%x",
+ rela->sym->sec->name, rela->addend);
+ return -1;
+ }
+
+ insn->dead_end = false;
+ }
+
+ return 0;
+}
+
+/*
+ * Warnings shouldn't be reported for ignored functions.
+ */
+static void add_ignores(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct section *sec;
+ struct symbol *func;
+
+ for_each_sec(file, sec) {
+ list_for_each_entry(func, &sec->symbol_list, list) {
+ if (func->type != STT_FUNC)
+ continue;
+
+ if (!ignore_func(file, func))
+ continue;
+
+ func_for_each_insn_all(file, func, insn)
+ insn->ignore = true;
+ }
+ }
+}
+
+/*
+ * FIXME: For now, just ignore any alternatives which add retpolines. This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+ struct section *sec;
+ struct rela *rela;
+ struct instruction *insn;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ if (rela->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (!insn) {
+ WARN("bad .discard.nospec entry");
+ return -1;
+ }
+
+ insn->ignore_alts = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Find the destination instructions for all jumps.
+ */
+static int add_jump_destinations(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct rela *rela;
+ struct section *dest_sec;
+ unsigned long dest_off;
+
+ for_each_insn(file, insn) {
+ if (insn->type != INSN_JUMP_CONDITIONAL &&
+ insn->type != INSN_JUMP_UNCONDITIONAL)
+ continue;
+
+ if (insn->ignore)
+ continue;
+
+ rela = find_rela_by_dest_range(insn->sec, insn->offset,
+ insn->len);
+ if (!rela) {
+ dest_sec = insn->sec;
+ dest_off = insn->offset + insn->len + insn->immediate;
+ } else if (rela->sym->type == STT_SECTION) {
+ dest_sec = rela->sym->sec;
+ dest_off = rela->addend + 4;
+ } else if (rela->sym->sec->idx) {
+ dest_sec = rela->sym->sec;
+ dest_off = rela->sym->sym.st_value + rela->addend + 4;
+ } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+ /*
+ * Retpoline jumps are really dynamic jumps in
+ * disguise, so convert them accordingly.
+ */
+ insn->type = INSN_JUMP_DYNAMIC;
+ insn->retpoline_safe = true;
+ continue;
+ } else {
+ /* sibling call */
+ insn->jump_dest = 0;
+ continue;
+ }
+
+ insn->jump_dest = find_insn(file, dest_sec, dest_off);
+ if (!insn->jump_dest) {
+
+ /*
+ * This is a special case where an alt instruction
+ * jumps past the end of the section. These are
+ * handled later in handle_group_alt().
+ */
+ if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+ continue;
+
+ WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
+ insn->sec, insn->offset, dest_sec->name,
+ dest_off);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Find the destination instructions for all calls.
+ */
+static int add_call_destinations(struct objtool_file *file)
+{
+ struct instruction *insn;
+ unsigned long dest_off;
+ struct rela *rela;
+
+ for_each_insn(file, insn) {
+ if (insn->type != INSN_CALL)
+ continue;
+
+ rela = find_rela_by_dest_range(insn->sec, insn->offset,
+ insn->len);
+ if (!rela) {
+ dest_off = insn->offset + insn->len + insn->immediate;
+ insn->call_dest = find_symbol_by_offset(insn->sec,
+ dest_off);
+
+ if (!insn->call_dest && !insn->ignore) {
+ WARN_FUNC("unsupported intra-function call",
+ insn->sec, insn->offset);
+ if (retpoline)
+ WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+ return -1;
+ }
+
+ } else if (rela->sym->type == STT_SECTION) {
+ insn->call_dest = find_symbol_by_offset(rela->sym->sec,
+ rela->addend+4);
+ if (!insn->call_dest ||
+ insn->call_dest->type != STT_FUNC) {
+ WARN_FUNC("can't find call dest symbol at %s+0x%x",
+ insn->sec, insn->offset,
+ rela->sym->sec->name,
+ rela->addend + 4);
+ return -1;
+ }
+ } else
+ insn->call_dest = rela->sym;
+ }
+
+ return 0;
+}
+
+/*
+ * The .alternatives section requires some extra special care, over and above
+ * what other special sections require:
+ *
+ * 1. Because alternatives are patched in-place, we need to insert a fake jump
+ * instruction at the end so that validate_branch() skips all the original
+ * replaced instructions when validating the new instruction path.
+ *
+ * 2. An added wrinkle is that the new instruction length might be zero. In
+ * that case the old instructions are replaced with noops. We simulate that
+ * by creating a fake jump as the only new instruction.
+ *
+ * 3. In some cases, the alternative section includes an instruction which
+ * conditionally jumps to the _end_ of the entry. We have to modify these
+ * jumps' destinations to point back to .text rather than the end of the
+ * entry in .altinstr_replacement.
+ *
+ * 4. It has been requested that we don't validate the !POPCNT feature path
+ * which is a "very very small percentage of machines".
+ */
+static int handle_group_alt(struct objtool_file *file,
+ struct special_alt *special_alt,
+ struct instruction *orig_insn,
+ struct instruction **new_insn)
+{
+ struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL;
+ unsigned long dest_off;
+
+ last_orig_insn = NULL;
+ insn = orig_insn;
+ sec_for_each_insn_from(file, insn) {
+ if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
+ break;
+
+ if (special_alt->skip_orig)
+ insn->type = INSN_NOP;
+
+ insn->alt_group = true;
+ last_orig_insn = insn;
+ }
+
+ if (next_insn_same_sec(file, last_orig_insn)) {
+ fake_jump = malloc(sizeof(*fake_jump));
+ if (!fake_jump) {
+ WARN("malloc failed");
+ return -1;
+ }
+ memset(fake_jump, 0, sizeof(*fake_jump));
+ INIT_LIST_HEAD(&fake_jump->alts);
+ clear_insn_state(&fake_jump->state);
+
+ fake_jump->sec = special_alt->new_sec;
+ fake_jump->offset = -1;
+ fake_jump->type = INSN_JUMP_UNCONDITIONAL;
+ fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
+ fake_jump->ignore = true;
+ }
+
+ if (!special_alt->new_len) {
+ if (!fake_jump) {
+ WARN("%s: empty alternative at end of section",
+ special_alt->orig_sec->name);
+ return -1;
+ }
+
+ *new_insn = fake_jump;
+ return 0;
+ }
+
+ last_new_insn = NULL;
+ insn = *new_insn;
+ sec_for_each_insn_from(file, insn) {
+ if (insn->offset >= special_alt->new_off + special_alt->new_len)
+ break;
+
+ last_new_insn = insn;
+
+ insn->ignore = orig_insn->ignore_alts;
+
+ if (insn->type != INSN_JUMP_CONDITIONAL &&
+ insn->type != INSN_JUMP_UNCONDITIONAL)
+ continue;
+
+ if (!insn->immediate)
+ continue;
+
+ dest_off = insn->offset + insn->len + insn->immediate;
+ if (dest_off == special_alt->new_off + special_alt->new_len) {
+ if (!fake_jump) {
+ WARN("%s: alternative jump to end of section",
+ special_alt->orig_sec->name);
+ return -1;
+ }
+ insn->jump_dest = fake_jump;
+ }
+
+ if (!insn->jump_dest) {
+ WARN_FUNC("can't find alternative jump destination",
+ insn->sec, insn->offset);
+ return -1;
+ }
+ }
+
+ if (!last_new_insn) {
+ WARN_FUNC("can't find last new alternative instruction",
+ special_alt->new_sec, special_alt->new_off);
+ return -1;
+ }
+
+ if (fake_jump)
+ list_add(&fake_jump->list, &last_new_insn->list);
+
+ return 0;
+}
+
+/*
+ * A jump table entry can either convert a nop to a jump or a jump to a nop.
+ * If the original instruction is a jump, make the alt entry an effective nop
+ * by just skipping the original instruction.
+ */
+static int handle_jump_alt(struct objtool_file *file,
+ struct special_alt *special_alt,
+ struct instruction *orig_insn,
+ struct instruction **new_insn)
+{
+ if (orig_insn->type == INSN_NOP)
+ return 0;
+
+ if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
+ WARN_FUNC("unsupported instruction at jump label",
+ orig_insn->sec, orig_insn->offset);
+ return -1;
+ }
+
+ *new_insn = list_next_entry(orig_insn, list);
+ return 0;
+}
+
+/*
+ * Read all the special sections which have alternate instructions which can be
+ * patched in or redirected to at runtime. Each instruction having alternate
+ * instruction(s) has them added to its insn->alts list, which will be
+ * traversed in validate_branch().
+ */
+static int add_special_section_alts(struct objtool_file *file)
+{
+ struct list_head special_alts;
+ struct instruction *orig_insn, *new_insn;
+ struct special_alt *special_alt, *tmp;
+ struct alternative *alt;
+ int ret;
+
+ ret = special_get_alts(file->elf, &special_alts);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
+
+ orig_insn = find_insn(file, special_alt->orig_sec,
+ special_alt->orig_off);
+ if (!orig_insn) {
+ WARN_FUNC("special: can't find orig instruction",
+ special_alt->orig_sec, special_alt->orig_off);
+ ret = -1;
+ goto out;
+ }
+
+ new_insn = NULL;
+ if (!special_alt->group || special_alt->new_len) {
+ new_insn = find_insn(file, special_alt->new_sec,
+ special_alt->new_off);
+ if (!new_insn) {
+ WARN_FUNC("special: can't find new instruction",
+ special_alt->new_sec,
+ special_alt->new_off);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (special_alt->group) {
+ ret = handle_group_alt(file, special_alt, orig_insn,
+ &new_insn);
+ if (ret)
+ goto out;
+ } else if (special_alt->jump_or_nop) {
+ ret = handle_jump_alt(file, special_alt, orig_insn,
+ &new_insn);
+ if (ret)
+ goto out;
+ }
+
+ alt = malloc(sizeof(*alt));
+ if (!alt) {
+ WARN("malloc failed");
+ ret = -1;
+ goto out;
+ }
+
+ alt->insn = new_insn;
+ list_add_tail(&alt->list, &orig_insn->alts);
+
+ list_del(&special_alt->list);
+ free(special_alt);
+ }
+
+out:
+ return ret;
+}
+
+static int add_switch_table(struct objtool_file *file, struct instruction *insn,
+ struct rela *table, struct rela *next_table)
+{
+ struct rela *rela = table;
+ struct instruction *alt_insn;
+ struct alternative *alt;
+ struct symbol *pfunc = insn->func->pfunc;
+ unsigned int prev_offset = 0;
+
+ list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
+ if (rela == next_table)
+ break;
+
+ /* Make sure the switch table entries are consecutive: */
+ if (prev_offset && rela->offset != prev_offset + 8)
+ break;
+
+ /* Detect function pointers from contiguous objects: */
+ if (rela->sym->sec == pfunc->sec &&
+ rela->addend == pfunc->offset)
+ break;
+
+ alt_insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (!alt_insn)
+ break;
+
+ /* Make sure the jmp dest is in the function or subfunction: */
+ if (alt_insn->func->pfunc != pfunc)
+ break;
+
+ alt = malloc(sizeof(*alt));
+ if (!alt) {
+ WARN("malloc failed");
+ return -1;
+ }
+
+ alt->insn = alt_insn;
+ list_add_tail(&alt->list, &insn->alts);
+ prev_offset = rela->offset;
+ }
+
+ if (!prev_offset) {
+ WARN_FUNC("can't find switch jump table",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * find_switch_table() - Given a dynamic jump, find the switch jump table in
+ * .rodata associated with it.
+ *
+ * There are 3 basic patterns:
+ *
+ * 1. jmpq *[rodata addr](,%reg,8)
+ *
+ * This is the most common case by far. It jumps to an address in a simple
+ * jump table which is stored in .rodata.
+ *
+ * 2. jmpq *[rodata addr](%rip)
+ *
+ * This is caused by a rare GCC quirk, currently only seen in three driver
+ * functions in the kernel, only with certain obscure non-distro configs.
+ *
+ * As part of an optimization, GCC makes a copy of an existing switch jump
+ * table, modifies it, and then hard-codes the jump (albeit with an indirect
+ * jump) to use a single entry in the table. The rest of the jump table and
+ * some of its jump targets remain as dead code.
+ *
+ * In such a case we can just crudely ignore all unreachable instruction
+ * warnings for the entire object file. Ideally we would just ignore them
+ * for the function, but that would require redesigning the code quite a
+ * bit. And honestly that's just not worth doing: unreachable instruction
+ * warnings are of questionable value anyway, and this is such a rare issue.
+ *
+ * 3. mov [rodata addr],%reg1
+ * ... some instructions ...
+ * jmpq *(%reg1,%reg2,8)
+ *
+ * This is a fairly uncommon pattern which is new for GCC 6. As of this
+ * writing, there are 11 occurrences of it in the allmodconfig kernel.
+ *
+ * As of GCC 7 there are quite a few more of these and the 'in between' code
+ * is significant. Esp. with KASAN enabled some of the code between the mov
+ * and jmpq uses .rodata itself, which can confuse things.
+ *
+ * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
+ * ensure the same register is used in the mov and jump instructions.
+ *
+ * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
+ */
+static struct rela *find_switch_table(struct objtool_file *file,
+ struct symbol *func,
+ struct instruction *insn)
+{
+ struct rela *text_rela, *rodata_rela;
+ struct instruction *orig_insn = insn;
+ unsigned long table_offset;
+
+ /*
+ * Backward search using the @first_jump_src links, these help avoid
+ * much of the 'in between' code. Which avoids us getting confused by
+ * it.
+ */
+ for (;
+ &insn->list != &file->insn_list &&
+ insn->sec == func->sec &&
+ insn->offset >= func->offset;
+
+ insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+
+ if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
+ break;
+
+ /* allow small jumps within the range */
+ if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+ insn->jump_dest &&
+ (insn->jump_dest->offset <= insn->offset ||
+ insn->jump_dest->offset > orig_insn->offset))
+ break;
+
+ /* look for a relocation which references .rodata */
+ text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
+ insn->len);
+ if (!text_rela || text_rela->sym != file->rodata->sym)
+ continue;
+
+ table_offset = text_rela->addend;
+ if (text_rela->type == R_X86_64_PC32)
+ table_offset += 4;
+
+ /*
+ * Make sure the .rodata address isn't associated with a
+ * symbol. gcc jump tables are anonymous data.
+ */
+ if (find_symbol_containing(file->rodata, table_offset))
+ continue;
+
+ rodata_rela = find_rela_by_dest(file->rodata, table_offset);
+ if (rodata_rela) {
+ /*
+ * Use of RIP-relative switch jumps is quite rare, and
+ * indicates a rare GCC quirk/bug which can leave dead
+ * code behind.
+ */
+ if (text_rela->type == R_X86_64_PC32)
+ file->ignore_unreachables = true;
+
+ return rodata_rela;
+ }
+ }
+
+ return NULL;
+}
+
+
+static int add_func_switch_tables(struct objtool_file *file,
+ struct symbol *func)
+{
+ struct instruction *insn, *last = NULL, *prev_jump = NULL;
+ struct rela *rela, *prev_rela = NULL;
+ int ret;
+
+ func_for_each_insn_all(file, func, insn) {
+ if (!last)
+ last = insn;
+
+ /*
+ * Store back-pointers for unconditional forward jumps such
+ * that find_switch_table() can back-track using those and
+ * avoid some potentially confusing code.
+ */
+ if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
+ insn->offset > last->offset &&
+ insn->jump_dest->offset > insn->offset &&
+ !insn->jump_dest->first_jump_src) {
+
+ insn->jump_dest->first_jump_src = insn;
+ last = insn->jump_dest;
+ }
+
+ if (insn->type != INSN_JUMP_DYNAMIC)
+ continue;
+
+ rela = find_switch_table(file, func, insn);
+ if (!rela)
+ continue;
+
+ /*
+ * We found a switch table, but we don't know yet how big it
+ * is. Don't add it until we reach the end of the function or
+ * the beginning of another switch table in the same function.
+ */
+ if (prev_jump) {
+ ret = add_switch_table(file, prev_jump, prev_rela, rela);
+ if (ret)
+ return ret;
+ }
+
+ prev_jump = insn;
+ prev_rela = rela;
+ }
+
+ if (prev_jump) {
+ ret = add_switch_table(file, prev_jump, prev_rela, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * For some switch statements, gcc generates a jump table in the .rodata
+ * section which contains a list of addresses within the function to jump to.
+ * This finds these jump tables and adds them to the insn->alts lists.
+ */
+static int add_switch_table_alts(struct objtool_file *file)
+{
+ struct section *sec;
+ struct symbol *func;
+ int ret;
+
+ if (!file->rodata || !file->rodata->rela)
+ return 0;
+
+ for_each_sec(file, sec) {
+ list_for_each_entry(func, &sec->symbol_list, list) {
+ if (func->type != STT_FUNC)
+ continue;
+
+ ret = add_func_switch_tables(file, func);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int read_unwind_hints(struct objtool_file *file)
+{
+ struct section *sec, *relasec;
+ struct rela *rela;
+ struct unwind_hint *hint;
+ struct instruction *insn;
+ struct cfi_reg *cfa;
+ int i;
+
+ sec = find_section_by_name(file->elf, ".discard.unwind_hints");
+ if (!sec)
+ return 0;
+
+ relasec = sec->rela;
+ if (!relasec) {
+ WARN("missing .rela.discard.unwind_hints section");
+ return -1;
+ }
+
+ if (sec->len % sizeof(struct unwind_hint)) {
+ WARN("struct unwind_hint size mismatch");
+ return -1;
+ }
+
+ file->hints = true;
+
+ for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+ hint = (struct unwind_hint *)sec->data->d_buf + i;
+
+ rela = find_rela_by_dest(sec, i * sizeof(*hint));
+ if (!rela) {
+ WARN("can't find rela for unwind_hints[%d]", i);
+ return -1;
+ }
+
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (!insn) {
+ WARN("can't find insn for unwind_hints[%d]", i);
+ return -1;
+ }
+
+ cfa = &insn->state.cfa;
+
+ if (hint->type == UNWIND_HINT_TYPE_SAVE) {
+ insn->save = true;
+ continue;
+
+ } else if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
+ insn->restore = true;
+ insn->hint = true;
+ continue;
+ }
+
+ insn->hint = true;
+
+ switch (hint->sp_reg) {
+ case ORC_REG_UNDEFINED:
+ cfa->base = CFI_UNDEFINED;
+ break;
+ case ORC_REG_SP:
+ cfa->base = CFI_SP;
+ break;
+ case ORC_REG_BP:
+ cfa->base = CFI_BP;
+ break;
+ case ORC_REG_SP_INDIRECT:
+ cfa->base = CFI_SP_INDIRECT;
+ break;
+ case ORC_REG_R10:
+ cfa->base = CFI_R10;
+ break;
+ case ORC_REG_R13:
+ cfa->base = CFI_R13;
+ break;
+ case ORC_REG_DI:
+ cfa->base = CFI_DI;
+ break;
+ case ORC_REG_DX:
+ cfa->base = CFI_DX;
+ break;
+ default:
+ WARN_FUNC("unsupported unwind_hint sp base reg %d",
+ insn->sec, insn->offset, hint->sp_reg);
+ return -1;
+ }
+
+ cfa->offset = hint->sp_offset;
+ insn->state.type = hint->type;
+ }
+
+ return 0;
+}
+
+static int read_retpoline_hints(struct objtool_file *file)
+{
+ struct section *sec;
+ struct instruction *insn;
+ struct rela *rela;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ if (rela->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (!insn) {
+ WARN("bad .discard.retpoline_safe entry");
+ return -1;
+ }
+
+ if (insn->type != INSN_JUMP_DYNAMIC &&
+ insn->type != INSN_CALL_DYNAMIC) {
+ WARN_FUNC("retpoline_safe hint not an indirect jump/call",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ insn->retpoline_safe = true;
+ }
+
+ return 0;
+}
+
+static int decode_sections(struct objtool_file *file)
+{
+ int ret;
+
+ ret = decode_instructions(file);
+ if (ret)
+ return ret;
+
+ ret = add_dead_ends(file);
+ if (ret)
+ return ret;
+
+ add_ignores(file);
+
+ ret = add_nospec_ignores(file);
+ if (ret)
+ return ret;
+
+ ret = add_jump_destinations(file);
+ if (ret)
+ return ret;
+
+ ret = add_special_section_alts(file);
+ if (ret)
+ return ret;
+
+ ret = add_call_destinations(file);
+ if (ret)
+ return ret;
+
+ ret = add_switch_table_alts(file);
+ if (ret)
+ return ret;
+
+ ret = read_unwind_hints(file);
+ if (ret)
+ return ret;
+
+ ret = read_retpoline_hints(file);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool is_fentry_call(struct instruction *insn)
+{
+ if (insn->type == INSN_CALL &&
+ insn->call_dest->type == STT_NOTYPE &&
+ !strcmp(insn->call_dest->name, "__fentry__"))
+ return true;
+
+ return false;
+}
+
+static bool has_modified_stack_frame(struct insn_state *state)
+{
+ int i;
+
+ if (state->cfa.base != initial_func_cfi.cfa.base ||
+ state->cfa.offset != initial_func_cfi.cfa.offset ||
+ state->stack_size != initial_func_cfi.cfa.offset ||
+ state->drap)
+ return true;
+
+ for (i = 0; i < CFI_NUM_REGS; i++)
+ if (state->regs[i].base != initial_func_cfi.regs[i].base ||
+ state->regs[i].offset != initial_func_cfi.regs[i].offset)
+ return true;
+
+ return false;
+}
+
+static bool has_valid_stack_frame(struct insn_state *state)
+{
+ if (state->cfa.base == CFI_BP && state->regs[CFI_BP].base == CFI_CFA &&
+ state->regs[CFI_BP].offset == -16)
+ return true;
+
+ if (state->drap && state->regs[CFI_BP].base == CFI_BP)
+ return true;
+
+ return false;
+}
+
+static int update_insn_state_regs(struct instruction *insn, struct insn_state *state)
+{
+ struct cfi_reg *cfa = &state->cfa;
+ struct stack_op *op = &insn->stack_op;
+
+ if (cfa->base != CFI_SP)
+ return 0;
+
+ /* push */
+ if (op->dest.type == OP_DEST_PUSH)
+ cfa->offset += 8;
+
+ /* pop */
+ if (op->src.type == OP_SRC_POP)
+ cfa->offset -= 8;
+
+ /* add immediate to sp */
+ if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
+ op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
+ cfa->offset -= op->src.offset;
+
+ return 0;
+}
+
+static void save_reg(struct insn_state *state, unsigned char reg, int base,
+ int offset)
+{
+ if (arch_callee_saved_reg(reg) &&
+ state->regs[reg].base == CFI_UNDEFINED) {
+ state->regs[reg].base = base;
+ state->regs[reg].offset = offset;
+ }
+}
+
+static void restore_reg(struct insn_state *state, unsigned char reg)
+{
+ state->regs[reg].base = CFI_UNDEFINED;
+ state->regs[reg].offset = 0;
+}
+
+/*
+ * A note about DRAP stack alignment:
+ *
+ * GCC has the concept of a DRAP register, which is used to help keep track of
+ * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
+ * register. The typical DRAP pattern is:
+ *
+ * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
+ * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
+ * 41 ff 72 f8 pushq -0x8(%r10)
+ * 55 push %rbp
+ * 48 89 e5 mov %rsp,%rbp
+ * (more pushes)
+ * 41 52 push %r10
+ * ...
+ * 41 5a pop %r10
+ * (more pops)
+ * 5d pop %rbp
+ * 49 8d 62 f8 lea -0x8(%r10),%rsp
+ * c3 retq
+ *
+ * There are some variations in the epilogues, like:
+ *
+ * 5b pop %rbx
+ * 41 5a pop %r10
+ * 41 5c pop %r12
+ * 41 5d pop %r13
+ * 41 5e pop %r14
+ * c9 leaveq
+ * 49 8d 62 f8 lea -0x8(%r10),%rsp
+ * c3 retq
+ *
+ * and:
+ *
+ * 4c 8b 55 e8 mov -0x18(%rbp),%r10
+ * 48 8b 5d e0 mov -0x20(%rbp),%rbx
+ * 4c 8b 65 f0 mov -0x10(%rbp),%r12
+ * 4c 8b 6d f8 mov -0x8(%rbp),%r13
+ * c9 leaveq
+ * 49 8d 62 f8 lea -0x8(%r10),%rsp
+ * c3 retq
+ *
+ * Sometimes r13 is used as the DRAP register, in which case it's saved and
+ * restored beforehand:
+ *
+ * 41 55 push %r13
+ * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
+ * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
+ * ...
+ * 49 8d 65 f0 lea -0x10(%r13),%rsp
+ * 41 5d pop %r13
+ * c3 retq
+ */
+static int update_insn_state(struct instruction *insn, struct insn_state *state)
+{
+ struct stack_op *op = &insn->stack_op;
+ struct cfi_reg *cfa = &state->cfa;
+ struct cfi_reg *regs = state->regs;
+
+ /* stack operations don't make sense with an undefined CFA */
+ if (cfa->base == CFI_UNDEFINED) {
+ if (insn->func) {
+ WARN_FUNC("undefined stack state", insn->sec, insn->offset);
+ return -1;
+ }
+ return 0;
+ }
+
+ if (state->type == ORC_TYPE_REGS || state->type == ORC_TYPE_REGS_IRET)
+ return update_insn_state_regs(insn, state);
+
+ switch (op->dest.type) {
+
+ case OP_DEST_REG:
+ switch (op->src.type) {
+
+ case OP_SRC_REG:
+ if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
+ cfa->base == CFI_SP &&
+ regs[CFI_BP].base == CFI_CFA &&
+ regs[CFI_BP].offset == -cfa->offset) {
+
+ /* mov %rsp, %rbp */
+ cfa->base = op->dest.reg;
+ state->bp_scratch = false;
+ }
+
+ else if (op->src.reg == CFI_SP &&
+ op->dest.reg == CFI_BP && state->drap) {
+
+ /* drap: mov %rsp, %rbp */
+ regs[CFI_BP].base = CFI_BP;
+ regs[CFI_BP].offset = -state->stack_size;
+ state->bp_scratch = false;
+ }
+
+ else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
+
+ /*
+ * mov %rsp, %reg
+ *
+ * This is needed for the rare case where GCC
+ * does:
+ *
+ * mov %rsp, %rax
+ * ...
+ * mov %rax, %rsp
+ */
+ state->vals[op->dest.reg].base = CFI_CFA;
+ state->vals[op->dest.reg].offset = -state->stack_size;
+ }
+
+ else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
+ cfa->base == CFI_BP) {
+
+ /*
+ * mov %rbp, %rsp
+ *
+ * Restore the original stack pointer (Clang).
+ */
+ state->stack_size = -state->regs[CFI_BP].offset;
+ }
+
+ else if (op->dest.reg == cfa->base) {
+
+ /* mov %reg, %rsp */
+ if (cfa->base == CFI_SP &&
+ state->vals[op->src.reg].base == CFI_CFA) {
+
+ /*
+ * This is needed for the rare case
+ * where GCC does something dumb like:
+ *
+ * lea 0x8(%rsp), %rcx
+ * ...
+ * mov %rcx, %rsp
+ */
+ cfa->offset = -state->vals[op->src.reg].offset;
+ state->stack_size = cfa->offset;
+
+ } else {
+ cfa->base = CFI_UNDEFINED;
+ cfa->offset = 0;
+ }
+ }
+
+ break;
+
+ case OP_SRC_ADD:
+ if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
+
+ /* add imm, %rsp */
+ state->stack_size -= op->src.offset;
+ if (cfa->base == CFI_SP)
+ cfa->offset -= op->src.offset;
+ break;
+ }
+
+ if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
+
+ /* lea disp(%rbp), %rsp */
+ state->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+ break;
+ }
+
+ if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
+
+ /* drap: lea disp(%rsp), %drap */
+ state->drap_reg = op->dest.reg;
+
+ /*
+ * lea disp(%rsp), %reg
+ *
+ * This is needed for the rare case where GCC
+ * does something dumb like:
+ *
+ * lea 0x8(%rsp), %rcx
+ * ...
+ * mov %rcx, %rsp
+ */
+ state->vals[op->dest.reg].base = CFI_CFA;
+ state->vals[op->dest.reg].offset = \
+ -state->stack_size + op->src.offset;
+
+ break;
+ }
+
+ if (state->drap && op->dest.reg == CFI_SP &&
+ op->src.reg == state->drap_reg) {
+
+ /* drap: lea disp(%drap), %rsp */
+ cfa->base = CFI_SP;
+ cfa->offset = state->stack_size = -op->src.offset;
+ state->drap_reg = CFI_UNDEFINED;
+ state->drap = false;
+ break;
+ }
+
+ if (op->dest.reg == state->cfa.base) {
+ WARN_FUNC("unsupported stack register modification",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ break;
+
+ case OP_SRC_AND:
+ if (op->dest.reg != CFI_SP ||
+ (state->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
+ (state->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
+ WARN_FUNC("unsupported stack pointer realignment",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ if (state->drap_reg != CFI_UNDEFINED) {
+ /* drap: and imm, %rsp */
+ cfa->base = state->drap_reg;
+ cfa->offset = state->stack_size = 0;
+ state->drap = true;
+ }
+
+ /*
+ * Older versions of GCC (4.8ish) realign the stack
+ * without DRAP, with a frame pointer.
+ */
+
+ break;
+
+ case OP_SRC_POP:
+ if (!state->drap && op->dest.type == OP_DEST_REG &&
+ op->dest.reg == cfa->base) {
+
+ /* pop %rbp */
+ cfa->base = CFI_SP;
+ }
+
+ if (state->drap && cfa->base == CFI_BP_INDIRECT &&
+ op->dest.type == OP_DEST_REG &&
+ op->dest.reg == state->drap_reg &&
+ state->drap_offset == -state->stack_size) {
+
+ /* drap: pop %drap */
+ cfa->base = state->drap_reg;
+ cfa->offset = 0;
+ state->drap_offset = -1;
+
+ } else if (regs[op->dest.reg].offset == -state->stack_size) {
+
+ /* pop %reg */
+ restore_reg(state, op->dest.reg);
+ }
+
+ state->stack_size -= 8;
+ if (cfa->base == CFI_SP)
+ cfa->offset -= 8;
+
+ break;
+
+ case OP_SRC_REG_INDIRECT:
+ if (state->drap && op->src.reg == CFI_BP &&
+ op->src.offset == state->drap_offset) {
+
+ /* drap: mov disp(%rbp), %drap */
+ cfa->base = state->drap_reg;
+ cfa->offset = 0;
+ state->drap_offset = -1;
+ }
+
+ if (state->drap && op->src.reg == CFI_BP &&
+ op->src.offset == regs[op->dest.reg].offset) {
+
+ /* drap: mov disp(%rbp), %reg */
+ restore_reg(state, op->dest.reg);
+
+ } else if (op->src.reg == cfa->base &&
+ op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
+
+ /* mov disp(%rbp), %reg */
+ /* mov disp(%rsp), %reg */
+ restore_reg(state, op->dest.reg);
+ }
+
+ break;
+
+ default:
+ WARN_FUNC("unknown stack-related instruction",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ break;
+
+ case OP_DEST_PUSH:
+ state->stack_size += 8;
+ if (cfa->base == CFI_SP)
+ cfa->offset += 8;
+
+ if (op->src.type != OP_SRC_REG)
+ break;
+
+ if (state->drap) {
+ if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+
+ /* drap: push %drap */
+ cfa->base = CFI_BP_INDIRECT;
+ cfa->offset = -state->stack_size;
+
+ /* save drap so we know when to restore it */
+ state->drap_offset = -state->stack_size;
+
+ } else if (op->src.reg == CFI_BP && cfa->base == state->drap_reg) {
+
+ /* drap: push %rbp */
+ state->stack_size = 0;
+
+ } else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+
+ /* drap: push %reg */
+ save_reg(state, op->src.reg, CFI_BP, -state->stack_size);
+ }
+
+ } else {
+
+ /* push %reg */
+ save_reg(state, op->src.reg, CFI_CFA, -state->stack_size);
+ }
+
+ /* detect when asm code uses rbp as a scratch register */
+ if (!no_fp && insn->func && op->src.reg == CFI_BP &&
+ cfa->base != CFI_BP)
+ state->bp_scratch = true;
+ break;
+
+ case OP_DEST_REG_INDIRECT:
+
+ if (state->drap) {
+ if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+
+ /* drap: mov %drap, disp(%rbp) */
+ cfa->base = CFI_BP_INDIRECT;
+ cfa->offset = op->dest.offset;
+
+ /* save drap offset so we know when to restore it */
+ state->drap_offset = op->dest.offset;
+ }
+
+ else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+
+ /* drap: mov reg, disp(%rbp) */
+ save_reg(state, op->src.reg, CFI_BP, op->dest.offset);
+ }
+
+ } else if (op->dest.reg == cfa->base) {
+
+ /* mov reg, disp(%rbp) */
+ /* mov reg, disp(%rsp) */
+ save_reg(state, op->src.reg, CFI_CFA,
+ op->dest.offset - state->cfa.offset);
+ }
+
+ break;
+
+ case OP_DEST_LEAVE:
+ if ((!state->drap && cfa->base != CFI_BP) ||
+ (state->drap && cfa->base != state->drap_reg)) {
+ WARN_FUNC("leave instruction with modified stack frame",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ /* leave (mov %rbp, %rsp; pop %rbp) */
+
+ state->stack_size = -state->regs[CFI_BP].offset - 8;
+ restore_reg(state, CFI_BP);
+
+ if (!state->drap) {
+ cfa->base = CFI_SP;
+ cfa->offset -= 8;
+ }
+
+ break;
+
+ case OP_DEST_MEM:
+ if (op->src.type != OP_SRC_POP) {
+ WARN_FUNC("unknown stack-related memory operation",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ /* pop mem */
+ state->stack_size -= 8;
+ if (cfa->base == CFI_SP)
+ cfa->offset -= 8;
+
+ break;
+
+ default:
+ WARN_FUNC("unknown stack-related instruction",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
+ return 0;
+}
+
+static bool insn_state_match(struct instruction *insn, struct insn_state *state)
+{
+ struct insn_state *state1 = &insn->state, *state2 = state;
+ int i;
+
+ if (memcmp(&state1->cfa, &state2->cfa, sizeof(state1->cfa))) {
+ WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
+ insn->sec, insn->offset,
+ state1->cfa.base, state1->cfa.offset,
+ state2->cfa.base, state2->cfa.offset);
+
+ } else if (memcmp(&state1->regs, &state2->regs, sizeof(state1->regs))) {
+ for (i = 0; i < CFI_NUM_REGS; i++) {
+ if (!memcmp(&state1->regs[i], &state2->regs[i],
+ sizeof(struct cfi_reg)))
+ continue;
+
+ WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
+ insn->sec, insn->offset,
+ i, state1->regs[i].base, state1->regs[i].offset,
+ i, state2->regs[i].base, state2->regs[i].offset);
+ break;
+ }
+
+ } else if (state1->type != state2->type) {
+ WARN_FUNC("stack state mismatch: type1=%d type2=%d",
+ insn->sec, insn->offset, state1->type, state2->type);
+
+ } else if (state1->drap != state2->drap ||
+ (state1->drap && state1->drap_reg != state2->drap_reg) ||
+ (state1->drap && state1->drap_offset != state2->drap_offset)) {
+ WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
+ insn->sec, insn->offset,
+ state1->drap, state1->drap_reg, state1->drap_offset,
+ state2->drap, state2->drap_reg, state2->drap_offset);
+
+ } else
+ return true;
+
+ return false;
+}
+
+/*
+ * Follow the branch starting at the given instruction, and recursively follow
+ * any other branches (jumps). Meanwhile, track the frame pointer state at
+ * each instruction and validate all the rules described in
+ * tools/objtool/Documentation/stack-validation.txt.
+ */
+static int validate_branch(struct objtool_file *file, struct instruction *first,
+ struct insn_state state)
+{
+ struct alternative *alt;
+ struct instruction *insn, *next_insn;
+ struct section *sec;
+ struct symbol *func = NULL;
+ int ret;
+
+ insn = first;
+ sec = insn->sec;
+
+ if (insn->alt_group && list_empty(&insn->alts)) {
+ WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
+ sec, insn->offset);
+ return 1;
+ }
+
+ while (1) {
+ next_insn = next_insn_same_sec(file, insn);
+
+ if (file->c_file && func && insn->func && func != insn->func->pfunc) {
+ WARN("%s() falls through to next function %s()",
+ func->name, insn->func->name);
+ return 1;
+ }
+
+ func = insn->func ? insn->func->pfunc : NULL;
+
+ if (func && insn->ignore) {
+ WARN_FUNC("BUG: why am I validating an ignored function?",
+ sec, insn->offset);
+ return 1;
+ }
+
+ if (insn->visited) {
+ if (!insn->hint && !insn_state_match(insn, &state))
+ return 1;
+
+ return 0;
+ }
+
+ if (insn->hint) {
+ if (insn->restore) {
+ struct instruction *save_insn, *i;
+
+ i = insn;
+ save_insn = NULL;
+ func_for_each_insn_continue_reverse(file, insn->func, i) {
+ if (i->save) {
+ save_insn = i;
+ break;
+ }
+ }
+
+ if (!save_insn) {
+ WARN_FUNC("no corresponding CFI save for CFI restore",
+ sec, insn->offset);
+ return 1;
+ }
+
+ if (!save_insn->visited) {
+ /*
+ * Oops, no state to copy yet.
+ * Hopefully we can reach this
+ * instruction from another branch
+ * after the save insn has been
+ * visited.
+ */
+ if (insn == first)
+ return 0;
+
+ WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
+ sec, insn->offset);
+ return 1;
+ }
+
+ insn->state = save_insn->state;
+ }
+
+ state = insn->state;
+
+ } else
+ insn->state = state;
+
+ insn->visited = true;
+
+ if (!insn->ignore_alts) {
+ list_for_each_entry(alt, &insn->alts, list) {
+ ret = validate_branch(file, alt->insn, state);
+ if (ret)
+ return 1;
+ }
+ }
+
+ switch (insn->type) {
+
+ case INSN_RETURN:
+ if (func && has_modified_stack_frame(&state)) {
+ WARN_FUNC("return with modified stack frame",
+ sec, insn->offset);
+ return 1;
+ }
+
+ if (state.bp_scratch) {
+ WARN("%s uses BP as a scratch register",
+ insn->func->name);
+ return 1;
+ }
+
+ return 0;
+
+ case INSN_CALL:
+ if (is_fentry_call(insn))
+ break;
+
+ ret = dead_end_function(file, insn->call_dest);
+ if (ret == 1)
+ return 0;
+ if (ret == -1)
+ return 1;
+
+ /* fallthrough */
+ case INSN_CALL_DYNAMIC:
+ if (!no_fp && func && !has_valid_stack_frame(&state)) {
+ WARN_FUNC("call without frame pointer save/setup",
+ sec, insn->offset);
+ return 1;
+ }
+ break;
+
+ case INSN_JUMP_CONDITIONAL:
+ case INSN_JUMP_UNCONDITIONAL:
+ if (insn->jump_dest &&
+ (!func || !insn->jump_dest->func ||
+ insn->jump_dest->func->pfunc == func)) {
+ ret = validate_branch(file, insn->jump_dest,
+ state);
+ if (ret)
+ return 1;
+
+ } else if (func && has_modified_stack_frame(&state)) {
+ WARN_FUNC("sibling call from callable instruction with modified stack frame",
+ sec, insn->offset);
+ return 1;
+ }
+
+ if (insn->type == INSN_JUMP_UNCONDITIONAL)
+ return 0;
+
+ break;
+
+ case INSN_JUMP_DYNAMIC:
+ if (func && list_empty(&insn->alts) &&
+ has_modified_stack_frame(&state)) {
+ WARN_FUNC("sibling call from callable instruction with modified stack frame",
+ sec, insn->offset);
+ return 1;
+ }
+
+ return 0;
+
+ case INSN_CONTEXT_SWITCH:
+ if (func && (!next_insn || !next_insn->hint)) {
+ WARN_FUNC("unsupported instruction in callable function",
+ sec, insn->offset);
+ return 1;
+ }
+ return 0;
+
+ case INSN_STACK:
+ if (update_insn_state(insn, &state))
+ return 1;
+
+ break;
+
+ default:
+ break;
+ }
+
+ if (insn->dead_end)
+ return 0;
+
+ if (!next_insn) {
+ if (state.cfa.base == CFI_UNDEFINED)
+ return 0;
+ WARN("%s: unexpected end of section", sec->name);
+ return 1;
+ }
+
+ insn = next_insn;
+ }
+
+ return 0;
+}
+
+static int validate_unwind_hints(struct objtool_file *file)
+{
+ struct instruction *insn;
+ int ret, warnings = 0;
+ struct insn_state state;
+
+ if (!file->hints)
+ return 0;
+
+ clear_insn_state(&state);
+
+ for_each_insn(file, insn) {
+ if (insn->hint && !insn->visited) {
+ ret = validate_branch(file, insn, state);
+ warnings += ret;
+ }
+ }
+
+ return warnings;
+}
+
+static int validate_retpoline(struct objtool_file *file)
+{
+ struct instruction *insn;
+ int warnings = 0;
+
+ for_each_insn(file, insn) {
+ if (insn->type != INSN_JUMP_DYNAMIC &&
+ insn->type != INSN_CALL_DYNAMIC)
+ continue;
+
+ if (insn->retpoline_safe)
+ continue;
+
+ /*
+ * .init.text code is ran before userspace and thus doesn't
+ * strictly need retpolines, except for modules which are
+ * loaded late, they very much do need retpoline in their
+ * .init.text
+ */
+ if (!strcmp(insn->sec->name, ".init.text") && !module)
+ continue;
+
+ WARN_FUNC("indirect %s found in RETPOLINE build",
+ insn->sec, insn->offset,
+ insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+
+ warnings++;
+ }
+
+ return warnings;
+}
+
+static bool is_kasan_insn(struct instruction *insn)
+{
+ return (insn->type == INSN_CALL &&
+ !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
+}
+
+static bool is_ubsan_insn(struct instruction *insn)
+{
+ return (insn->type == INSN_CALL &&
+ !strcmp(insn->call_dest->name,
+ "__ubsan_handle_builtin_unreachable"));
+}
+
+static bool ignore_unreachable_insn(struct instruction *insn)
+{
+ int i;
+
+ if (insn->ignore || insn->type == INSN_NOP)
+ return true;
+
+ /*
+ * Ignore any unused exceptions. This can happen when a whitelisted
+ * function has an exception table entry.
+ *
+ * Also ignore alternative replacement instructions. This can happen
+ * when a whitelisted function uses one of the ALTERNATIVE macros.
+ */
+ if (!strcmp(insn->sec->name, ".fixup") ||
+ !strcmp(insn->sec->name, ".altinstr_replacement") ||
+ !strcmp(insn->sec->name, ".altinstr_aux"))
+ return true;
+
+ /*
+ * Check if this (or a subsequent) instruction is related to
+ * CONFIG_UBSAN or CONFIG_KASAN.
+ *
+ * End the search at 5 instructions to avoid going into the weeds.
+ */
+ if (!insn->func)
+ return false;
+ for (i = 0; i < 5; i++) {
+
+ if (is_kasan_insn(insn) || is_ubsan_insn(insn))
+ return true;
+
+ if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+ if (insn->jump_dest &&
+ insn->jump_dest->func == insn->func) {
+ insn = insn->jump_dest;
+ continue;
+ }
+
+ break;
+ }
+
+ if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
+ break;
+
+ insn = list_next_entry(insn, list);
+ }
+
+ return false;
+}
+
+static int validate_functions(struct objtool_file *file)
+{
+ struct section *sec;
+ struct symbol *func;
+ struct instruction *insn;
+ struct insn_state state;
+ int ret, warnings = 0;
+
+ clear_insn_state(&state);
+
+ state.cfa = initial_func_cfi.cfa;
+ memcpy(&state.regs, &initial_func_cfi.regs,
+ CFI_NUM_REGS * sizeof(struct cfi_reg));
+ state.stack_size = initial_func_cfi.cfa.offset;
+
+ for_each_sec(file, sec) {
+ list_for_each_entry(func, &sec->symbol_list, list) {
+ if (func->type != STT_FUNC || func->pfunc != func)
+ continue;
+
+ insn = find_insn(file, sec, func->offset);
+ if (!insn || insn->ignore)
+ continue;
+
+ ret = validate_branch(file, insn, state);
+ warnings += ret;
+ }
+ }
+
+ return warnings;
+}
+
+static int validate_reachable_instructions(struct objtool_file *file)
+{
+ struct instruction *insn;
+
+ if (file->ignore_unreachables)
+ return 0;
+
+ for_each_insn(file, insn) {
+ if (insn->visited || ignore_unreachable_insn(insn))
+ continue;
+
+ WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void cleanup(struct objtool_file *file)
+{
+ struct instruction *insn, *tmpinsn;
+ struct alternative *alt, *tmpalt;
+
+ list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
+ list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
+ list_del(&alt->list);
+ free(alt);
+ }
+ list_del(&insn->list);
+ hash_del(&insn->hash);
+ free(insn);
+ }
+ elf_close(file->elf);
+}
+
+int check(const char *_objname, bool orc)
+{
+ struct objtool_file file;
+ int ret, warnings = 0;
+
+ objname = _objname;
+
+ file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
+ if (!file.elf)
+ return 1;
+
+ INIT_LIST_HEAD(&file.insn_list);
+ hash_init(file.insn_hash);
+ file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
+ file.rodata = find_section_by_name(file.elf, ".rodata");
+ file.c_file = find_section_by_name(file.elf, ".comment");
+ file.ignore_unreachables = no_unreachable;
+ file.hints = false;
+
+ arch_initial_func_cfi_state(&initial_func_cfi);
+
+ ret = decode_sections(&file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+
+ if (list_empty(&file.insn_list))
+ goto out;
+
+ if (retpoline) {
+ ret = validate_retpoline(&file);
+ if (ret < 0)
+ return ret;
+ warnings += ret;
+ }
+
+ ret = validate_functions(&file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+
+ ret = validate_unwind_hints(&file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+
+ if (!warnings) {
+ ret = validate_reachable_instructions(&file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
+ if (orc) {
+ ret = create_orc(&file);
+ if (ret < 0)
+ goto out;
+
+ ret = create_orc_sections(&file);
+ if (ret < 0)
+ goto out;
+
+ ret = elf_write(file.elf);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ cleanup(&file);
+
+ /* ignore warnings for now until we get all the code cleaned up */
+ if (ret || warnings)
+ return 0;
+ return 0;
+}
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
new file mode 100644
index 000000000000..c6b68fcb926f
--- /dev/null
+++ b/tools/objtool/check.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CHECK_H
+#define _CHECK_H
+
+#include <stdbool.h>
+#include "elf.h"
+#include "cfi.h"
+#include "arch.h"
+#include "orc.h"
+#include <linux/hashtable.h>
+
+struct insn_state {
+ struct cfi_reg cfa;
+ struct cfi_reg regs[CFI_NUM_REGS];
+ int stack_size;
+ unsigned char type;
+ bool bp_scratch;
+ bool drap;
+ int drap_reg, drap_offset;
+ struct cfi_reg vals[CFI_NUM_REGS];
+};
+
+struct instruction {
+ struct list_head list;
+ struct hlist_node hash;
+ struct section *sec;
+ unsigned long offset;
+ unsigned int len;
+ unsigned char type;
+ unsigned long immediate;
+ bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
+ bool retpoline_safe;
+ struct symbol *call_dest;
+ struct instruction *jump_dest;
+ struct instruction *first_jump_src;
+ struct list_head alts;
+ struct symbol *func;
+ struct stack_op stack_op;
+ struct insn_state state;
+ struct orc_entry orc;
+};
+
+struct objtool_file {
+ struct elf *elf;
+ struct list_head insn_list;
+ DECLARE_HASHTABLE(insn_hash, 16);
+ struct section *rodata, *whitelist;
+ bool ignore_unreachables, c_file, hints;
+};
+
+int check(const char *objname, bool orc);
+
+struct instruction *find_insn(struct objtool_file *file,
+ struct section *sec, unsigned long offset);
+
+#define for_each_insn(file, insn) \
+ list_for_each_entry(insn, &file->insn_list, list)
+
+#define sec_for_each_insn(file, sec, insn) \
+ for (insn = find_insn(file, sec, 0); \
+ insn && &insn->list != &file->insn_list && \
+ insn->sec == sec; \
+ insn = list_next_entry(insn, list))
+
+
+#endif /* _CHECK_H */
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index faacf0c89976..4e60e105583e 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -31,13 +31,6 @@
#include "elf.h"
#include "warn.h"

-/*
- * Fallback for systems without this "read, mmaping if possible" cmd.
- */
-#ifndef ELF_C_READ_MMAP
-#define ELF_C_READ_MMAP ELF_C_READ
-#endif
-
struct section *find_section_by_name(struct elf *elf, const char *name)
{
struct section *sec;
@@ -86,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
return NULL;
}

+struct symbol *find_symbol_by_name(struct elf *elf, const char *name)
+{
+ struct section *sec;
+ struct symbol *sym;
+
+ list_for_each_entry(sec, &elf->sections, list)
+ list_for_each_entry(sym, &sec->symbol_list, list)
+ if (!strcmp(sym->name, name))
+ return sym;
+
+ return NULL;
+}
+
struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
{
struct symbol *sym;
@@ -140,12 +146,12 @@ static int read_sections(struct elf *elf)
int i;

if (elf_getshdrnum(elf->elf, &sections_nr)) {
- perror("elf_getshdrnum");
+ WARN_ELF("elf_getshdrnum");
return -1;
}

if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
- perror("elf_getshdrstrndx");
+ WARN_ELF("elf_getshdrstrndx");
return -1;
}

@@ -166,37 +172,37 @@ static int read_sections(struct elf *elf)

s = elf_getscn(elf->elf, i);
if (!s) {
- perror("elf_getscn");
+ WARN_ELF("elf_getscn");
return -1;
}

sec->idx = elf_ndxscn(s);

if (!gelf_getshdr(s, &sec->sh)) {
- perror("gelf_getshdr");
+ WARN_ELF("gelf_getshdr");
return -1;
}

sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
if (!sec->name) {
- perror("elf_strptr");
+ WARN_ELF("elf_strptr");
return -1;
}

- sec->elf_data = elf_getdata(s, NULL);
- if (!sec->elf_data) {
- perror("elf_getdata");
- return -1;
- }
-
- if (sec->elf_data->d_off != 0 ||
- sec->elf_data->d_size != sec->sh.sh_size) {
- WARN("unexpected data attributes for %s", sec->name);
- return -1;
+ if (sec->sh.sh_size != 0) {
+ sec->data = elf_getdata(s, NULL);
+ if (!sec->data) {
+ WARN_ELF("elf_getdata");
+ return -1;
+ }
+ if (sec->data->d_off != 0 ||
+ sec->data->d_size != sec->sh.sh_size) {
+ WARN("unexpected data attributes for %s",
+ sec->name);
+ return -1;
+ }
}
-
- sec->data = (unsigned long)sec->elf_data->d_buf;
- sec->len = sec->elf_data->d_size;
+ sec->len = sec->sh.sh_size;
}

/* sanity check, one more call to elf_nextscn() should return NULL */
@@ -210,10 +216,11 @@ static int read_sections(struct elf *elf)

static int read_symbols(struct elf *elf)
{
- struct section *symtab;
- struct symbol *sym;
+ struct section *symtab, *sec;
+ struct symbol *sym, *pfunc;
struct list_head *entry, *tmp;
int symbols_nr, i;
+ char *coldstr;

symtab = find_section_by_name(elf, ".symtab");
if (!symtab) {
@@ -233,15 +240,15 @@ static int read_symbols(struct elf *elf)

sym->idx = i;

- if (!gelf_getsym(symtab->elf_data, i, &sym->sym)) {
- perror("gelf_getsym");
+ if (!gelf_getsym(symtab->data, i, &sym->sym)) {
+ WARN_ELF("gelf_getsym");
goto err;
}

sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
sym->sym.st_name);
if (!sym->name) {
- perror("elf_strptr");
+ WARN_ELF("elf_strptr");
goto err;
}

@@ -288,6 +295,30 @@ static int read_symbols(struct elf *elf)
hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
}

+ /* Create parent/child links for any cold subfunctions */
+ list_for_each_entry(sec, &elf->sections, list) {
+ list_for_each_entry(sym, &sec->symbol_list, list) {
+ if (sym->type != STT_FUNC)
+ continue;
+ sym->pfunc = sym->cfunc = sym;
+ coldstr = strstr(sym->name, ".cold.");
+ if (coldstr) {
+ coldstr[0] = '\0';
+ pfunc = find_symbol_by_name(elf, sym->name);
+ coldstr[0] = '.';
+
+ if (!pfunc) {
+ WARN("%s(): can't find parent function",
+ sym->name);
+ goto err;
+ }
+
+ sym->pfunc = pfunc;
+ pfunc->cfunc = sym;
+ }
+ }
+ }
+
return 0;

err:
@@ -323,8 +354,8 @@ static int read_relas(struct elf *elf)
}
memset(rela, 0, sizeof(*rela));

- if (!gelf_getrela(sec->elf_data, i, &rela->rela)) {
- perror("gelf_getrela");
+ if (!gelf_getrela(sec->data, i, &rela->rela)) {
+ WARN_ELF("gelf_getrela");
return -1;
}

@@ -348,9 +379,10 @@ static int read_relas(struct elf *elf)
return 0;
}

-struct elf *elf_open(const char *name)
+struct elf *elf_open(const char *name, int flags)
{
struct elf *elf;
+ Elf_Cmd cmd;

elf_version(EV_CURRENT);

@@ -363,27 +395,28 @@ struct elf *elf_open(const char *name)

INIT_LIST_HEAD(&elf->sections);

- elf->name = strdup(name);
- if (!elf->name) {
- perror("strdup");
- goto err;
- }
-
- elf->fd = open(name, O_RDONLY);
+ elf->fd = open(name, flags);
if (elf->fd == -1) {
fprintf(stderr, "objtool: Can't open '%s': %s\n",
name, strerror(errno));
goto err;
}

- elf->elf = elf_begin(elf->fd, ELF_C_READ_MMAP, NULL);
+ if ((flags & O_ACCMODE) == O_RDONLY)
+ cmd = ELF_C_READ_MMAP;
+ else if ((flags & O_ACCMODE) == O_RDWR)
+ cmd = ELF_C_RDWR;
+ else /* O_WRONLY */
+ cmd = ELF_C_WRITE;
+
+ elf->elf = elf_begin(elf->fd, cmd, NULL);
if (!elf->elf) {
- perror("elf_begin");
+ WARN_ELF("elf_begin");
goto err;
}

if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
- perror("gelf_getehdr");
+ WARN_ELF("gelf_getehdr");
goto err;
}

@@ -403,12 +436,212 @@ struct elf *elf_open(const char *name)
return NULL;
}

+struct section *elf_create_section(struct elf *elf, const char *name,
+ size_t entsize, int nr)
+{
+ struct section *sec, *shstrtab;
+ size_t size = entsize * nr;
+ struct Elf_Scn *s;
+ Elf_Data *data;
+
+ sec = malloc(sizeof(*sec));
+ if (!sec) {
+ perror("malloc");
+ return NULL;
+ }
+ memset(sec, 0, sizeof(*sec));
+
+ INIT_LIST_HEAD(&sec->symbol_list);
+ INIT_LIST_HEAD(&sec->rela_list);
+ hash_init(sec->rela_hash);
+ hash_init(sec->symbol_hash);
+
+ list_add_tail(&sec->list, &elf->sections);
+
+ s = elf_newscn(elf->elf);
+ if (!s) {
+ WARN_ELF("elf_newscn");
+ return NULL;
+ }
+
+ sec->name = strdup(name);
+ if (!sec->name) {
+ perror("strdup");
+ return NULL;
+ }
+
+ sec->idx = elf_ndxscn(s);
+ sec->len = size;
+ sec->changed = true;
+
+ sec->data = elf_newdata(s);
+ if (!sec->data) {
+ WARN_ELF("elf_newdata");
+ return NULL;
+ }
+
+ sec->data->d_size = size;
+ sec->data->d_align = 1;
+
+ if (size) {
+ sec->data->d_buf = malloc(size);
+ if (!sec->data->d_buf) {
+ perror("malloc");
+ return NULL;
+ }
+ memset(sec->data->d_buf, 0, size);
+ }
+
+ if (!gelf_getshdr(s, &sec->sh)) {
+ WARN_ELF("gelf_getshdr");
+ return NULL;
+ }
+
+ sec->sh.sh_size = size;
+ sec->sh.sh_entsize = entsize;
+ sec->sh.sh_type = SHT_PROGBITS;
+ sec->sh.sh_addralign = 1;
+ sec->sh.sh_flags = SHF_ALLOC;
+
+
+ /* Add section name to .shstrtab */
+ shstrtab = find_section_by_name(elf, ".shstrtab");
+ if (!shstrtab) {
+ WARN("can't find .shstrtab section");
+ return NULL;
+ }
+
+ s = elf_getscn(elf->elf, shstrtab->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return NULL;
+ }
+
+ data = elf_newdata(s);
+ if (!data) {
+ WARN_ELF("elf_newdata");
+ return NULL;
+ }
+
+ data->d_buf = sec->name;
+ data->d_size = strlen(name) + 1;
+ data->d_align = 1;
+
+ sec->sh.sh_name = shstrtab->len;
+
+ shstrtab->len += strlen(name) + 1;
+ shstrtab->changed = true;
+
+ return sec;
+}
+
+struct section *elf_create_rela_section(struct elf *elf, struct section *base)
+{
+ char *relaname;
+ struct section *sec;
+
+ relaname = malloc(strlen(base->name) + strlen(".rela") + 1);
+ if (!relaname) {
+ perror("malloc");
+ return NULL;
+ }
+ strcpy(relaname, ".rela");
+ strcat(relaname, base->name);
+
+ sec = elf_create_section(elf, relaname, sizeof(GElf_Rela), 0);
+ free(relaname);
+ if (!sec)
+ return NULL;
+
+ base->rela = sec;
+ sec->base = base;
+
+ sec->sh.sh_type = SHT_RELA;
+ sec->sh.sh_addralign = 8;
+ sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
+ sec->sh.sh_info = base->idx;
+ sec->sh.sh_flags = SHF_INFO_LINK;
+
+ return sec;
+}
+
+int elf_rebuild_rela_section(struct section *sec)
+{
+ struct rela *rela;
+ int nr, idx = 0, size;
+ GElf_Rela *relas;
+
+ nr = 0;
+ list_for_each_entry(rela, &sec->rela_list, list)
+ nr++;
+
+ size = nr * sizeof(*relas);
+ relas = malloc(size);
+ if (!relas) {
+ perror("malloc");
+ return -1;
+ }
+
+ sec->data->d_buf = relas;
+ sec->data->d_size = size;
+
+ sec->sh.sh_size = size;
+
+ idx = 0;
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ relas[idx].r_offset = rela->offset;
+ relas[idx].r_addend = rela->addend;
+ relas[idx].r_info = GELF_R_INFO(rela->sym->idx, rela->type);
+ idx++;
+ }
+
+ return 0;
+}
+
+int elf_write(struct elf *elf)
+{
+ struct section *sec;
+ Elf_Scn *s;
+
+ /* Update section headers for changed sections: */
+ list_for_each_entry(sec, &elf->sections, list) {
+ if (sec->changed) {
+ s = elf_getscn(elf->elf, sec->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+ if (!gelf_update_shdr(s, &sec->sh)) {
+ WARN_ELF("gelf_update_shdr");
+ return -1;
+ }
+ }
+ }
+
+ /* Make sure the new section header entries get updated properly. */
+ elf_flagelf(elf->elf, ELF_C_SET, ELF_F_DIRTY);
+
+ /* Write all changes to the file. */
+ if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
+ WARN_ELF("elf_update");
+ return -1;
+ }
+
+ return 0;
+}
+
void elf_close(struct elf *elf)
{
struct section *sec, *tmpsec;
struct symbol *sym, *tmpsym;
struct rela *rela, *tmprela;

+ if (elf->elf)
+ elf_end(elf->elf);
+
+ if (elf->fd > 0)
+ close(elf->fd);
+
list_for_each_entry_safe(sec, tmpsec, &elf->sections, list) {
list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) {
list_del(&sym->list);
@@ -423,11 +656,6 @@ void elf_close(struct elf *elf)
list_del(&sec->list);
free(sec);
}
- if (elf->name)
- free(elf->name);
- if (elf->fd > 0)
- close(elf->fd);
- if (elf->elf)
- elf_end(elf->elf);
+
free(elf);
}
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index 731973e1a3f5..de5cd2ddded9 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -28,6 +28,13 @@
# define elf_getshdrstrndx elf_getshstrndx
#endif

+/*
+ * Fallback for systems without this "read, mmaping if possible" cmd.
+ */
+#ifndef ELF_C_READ_MMAP
+#define ELF_C_READ_MMAP ELF_C_READ
+#endif
+
struct section {
struct list_head list;
GElf_Shdr sh;
@@ -37,11 +44,11 @@ struct section {
DECLARE_HASHTABLE(rela_hash, 16);
struct section *base, *rela;
struct symbol *sym;
- Elf_Data *elf_data;
+ Elf_Data *data;
char *name;
int idx;
- unsigned long data;
unsigned int len;
+ bool changed, text;
};

struct symbol {
@@ -54,6 +61,7 @@ struct symbol {
unsigned char bind, type;
unsigned long offset;
unsigned int len;
+ struct symbol *pfunc, *cfunc;
};

struct rela {
@@ -76,16 +84,23 @@ struct elf {
};


-struct elf *elf_open(const char *name);
+struct elf *elf_open(const char *name, int flags);
struct section *find_section_by_name(struct elf *elf, const char *name);
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
+struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
unsigned int len);
struct symbol *find_containing_func(struct section *sec, unsigned long offset);
+struct section *elf_create_section(struct elf *elf, const char *name, size_t
+ entsize, int nr);
+struct section *elf_create_rela_section(struct elf *elf, struct section *base);
+int elf_rebuild_rela_section(struct section *sec);
+int elf_write(struct elf *elf);
void elf_close(struct elf *elf);

-
+#define for_each_sec(file, sec) \
+ list_for_each_entry(sec, &file->elf->sections, list)

#endif /* _OBJTOOL_ELF_H */
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 46c326db4f46..07f329919828 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -31,11 +31,10 @@
#include <stdlib.h>
#include <subcmd/exec-cmd.h>
#include <subcmd/pager.h>
+#include <linux/kernel.h>

#include "builtin.h"

-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-
struct cmd_struct {
const char *name;
int (*fn)(int, const char **);
@@ -43,10 +42,11 @@ struct cmd_struct {
};

static const char objtool_usage_string[] =
- "objtool [OPTIONS] COMMAND [ARGS]";
+ "objtool COMMAND [ARGS]";

static struct cmd_struct objtool_cmds[] = {
{"check", cmd_check, "Perform stack metadata validation on an object file" },
+ {"orc", cmd_orc, "Generate in-place ORC unwind tables for an object file" },
};

bool help;
@@ -70,7 +70,7 @@ static void cmd_usage(void)

printf("\n");

- exit(1);
+ exit(129);
}

static void handle_options(int *argc, const char ***argv)
@@ -86,9 +86,7 @@ static void handle_options(int *argc, const char ***argv)
break;
} else {
fprintf(stderr, "Unknown option: %s\n", cmd);
- fprintf(stderr, "\n Usage: %s\n",
- objtool_usage_string);
- exit(1);
+ cmd_usage();
}

(*argv)++;
diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h
new file mode 100644
index 000000000000..b0e92a6d0903
--- /dev/null
+++ b/tools/objtool/orc.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_H
+#define _ORC_H
+
+#include <asm/orc_types.h>
+
+struct objtool_file;
+
+int create_orc(struct objtool_file *file);
+int create_orc_sections(struct objtool_file *file);
+
+int orc_dump(const char *objname);
+
+#endif /* _ORC_H */
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
new file mode 100644
index 000000000000..c3343820916a
--- /dev/null
+++ b/tools/objtool/orc_dump.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <unistd.h>
+#include "orc.h"
+#include "warn.h"
+
+static const char *reg_name(unsigned int reg)
+{
+ switch (reg) {
+ case ORC_REG_PREV_SP:
+ return "prevsp";
+ case ORC_REG_DX:
+ return "dx";
+ case ORC_REG_DI:
+ return "di";
+ case ORC_REG_BP:
+ return "bp";
+ case ORC_REG_SP:
+ return "sp";
+ case ORC_REG_R10:
+ return "r10";
+ case ORC_REG_R13:
+ return "r13";
+ case ORC_REG_BP_INDIRECT:
+ return "bp(ind)";
+ case ORC_REG_SP_INDIRECT:
+ return "sp(ind)";
+ default:
+ return "?";
+ }
+}
+
+static const char *orc_type_name(unsigned int type)
+{
+ switch (type) {
+ case ORC_TYPE_CALL:
+ return "call";
+ case ORC_TYPE_REGS:
+ return "regs";
+ case ORC_TYPE_REGS_IRET:
+ return "iret";
+ default:
+ return "?";
+ }
+}
+
+static void print_reg(unsigned int reg, int offset)
+{
+ if (reg == ORC_REG_BP_INDIRECT)
+ printf("(bp%+d)", offset);
+ else if (reg == ORC_REG_SP_INDIRECT)
+ printf("(sp%+d)", offset);
+ else if (reg == ORC_REG_UNDEFINED)
+ printf("(und)");
+ else
+ printf("%s%+d", reg_name(reg), offset);
+}
+
+int orc_dump(const char *_objname)
+{
+ int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
+ struct orc_entry *orc = NULL;
+ char *name;
+ size_t nr_sections;
+ Elf64_Addr orc_ip_addr = 0;
+ size_t shstrtab_idx;
+ Elf *elf;
+ Elf_Scn *scn;
+ GElf_Shdr sh;
+ GElf_Rela rela;
+ GElf_Sym sym;
+ Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL;
+
+
+ objname = _objname;
+
+ elf_version(EV_CURRENT);
+
+ fd = open(objname, O_RDONLY);
+ if (fd == -1) {
+ perror("open");
+ return -1;
+ }
+
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ WARN_ELF("elf_begin");
+ return -1;
+ }
+
+ if (elf_getshdrnum(elf, &nr_sections)) {
+ WARN_ELF("elf_getshdrnum");
+ return -1;
+ }
+
+ if (elf_getshdrstrndx(elf, &shstrtab_idx)) {
+ WARN_ELF("elf_getshdrstrndx");
+ return -1;
+ }
+
+ for (i = 0; i < nr_sections; i++) {
+ scn = elf_getscn(elf, i);
+ if (!scn) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ if (!gelf_getshdr(scn, &sh)) {
+ WARN_ELF("gelf_getshdr");
+ return -1;
+ }
+
+ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+ if (!name) {
+ WARN_ELF("elf_strptr");
+ return -1;
+ }
+
+ data = elf_getdata(scn, NULL);
+ if (!data) {
+ WARN_ELF("elf_getdata");
+ return -1;
+ }
+
+ if (!strcmp(name, ".symtab")) {
+ symtab = data;
+ } else if (!strcmp(name, ".orc_unwind")) {
+ orc = data->d_buf;
+ orc_size = sh.sh_size;
+ } else if (!strcmp(name, ".orc_unwind_ip")) {
+ orc_ip = data->d_buf;
+ orc_ip_addr = sh.sh_addr;
+ } else if (!strcmp(name, ".rela.orc_unwind_ip")) {
+ rela_orc_ip = data;
+ }
+ }
+
+ if (!symtab || !orc || !orc_ip)
+ return 0;
+
+ if (orc_size % sizeof(*orc) != 0) {
+ WARN("bad .orc_unwind section size");
+ return -1;
+ }
+
+ nr_entries = orc_size / sizeof(*orc);
+ for (i = 0; i < nr_entries; i++) {
+ if (rela_orc_ip) {
+ if (!gelf_getrela(rela_orc_ip, i, &rela)) {
+ WARN_ELF("gelf_getrela");
+ return -1;
+ }
+
+ if (!gelf_getsym(symtab, GELF_R_SYM(rela.r_info), &sym)) {
+ WARN_ELF("gelf_getsym");
+ return -1;
+ }
+
+ scn = elf_getscn(elf, sym.st_shndx);
+ if (!scn) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ if (!gelf_getshdr(scn, &sh)) {
+ WARN_ELF("gelf_getshdr");
+ return -1;
+ }
+
+ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+ if (!name || !*name) {
+ WARN_ELF("elf_strptr");
+ return -1;
+ }
+
+ printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
+
+ } else {
+ printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
+ }
+
+
+ printf(" sp:");
+
+ print_reg(orc[i].sp_reg, orc[i].sp_offset);
+
+ printf(" bp:");
+
+ print_reg(orc[i].bp_reg, orc[i].bp_offset);
+
+ printf(" type:%s\n", orc_type_name(orc[i].type));
+ }
+
+ elf_end(elf);
+ close(fd);
+
+ return 0;
+}
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
new file mode 100644
index 000000000000..18384d9be4e1
--- /dev/null
+++ b/tools/objtool/orc_gen.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "orc.h"
+#include "check.h"
+#include "warn.h"
+
+int create_orc(struct objtool_file *file)
+{
+ struct instruction *insn;
+
+ for_each_insn(file, insn) {
+ struct orc_entry *orc = &insn->orc;
+ struct cfi_reg *cfa = &insn->state.cfa;
+ struct cfi_reg *bp = &insn->state.regs[CFI_BP];
+
+ if (cfa->base == CFI_UNDEFINED) {
+ orc->sp_reg = ORC_REG_UNDEFINED;
+ continue;
+ }
+
+ switch (cfa->base) {
+ case CFI_SP:
+ orc->sp_reg = ORC_REG_SP;
+ break;
+ case CFI_SP_INDIRECT:
+ orc->sp_reg = ORC_REG_SP_INDIRECT;
+ break;
+ case CFI_BP:
+ orc->sp_reg = ORC_REG_BP;
+ break;
+ case CFI_BP_INDIRECT:
+ orc->sp_reg = ORC_REG_BP_INDIRECT;
+ break;
+ case CFI_R10:
+ orc->sp_reg = ORC_REG_R10;
+ break;
+ case CFI_R13:
+ orc->sp_reg = ORC_REG_R13;
+ break;
+ case CFI_DI:
+ orc->sp_reg = ORC_REG_DI;
+ break;
+ case CFI_DX:
+ orc->sp_reg = ORC_REG_DX;
+ break;
+ default:
+ WARN_FUNC("unknown CFA base reg %d",
+ insn->sec, insn->offset, cfa->base);
+ return -1;
+ }
+
+ switch(bp->base) {
+ case CFI_UNDEFINED:
+ orc->bp_reg = ORC_REG_UNDEFINED;
+ break;
+ case CFI_CFA:
+ orc->bp_reg = ORC_REG_PREV_SP;
+ break;
+ case CFI_BP:
+ orc->bp_reg = ORC_REG_BP;
+ break;
+ default:
+ WARN_FUNC("unknown BP base reg %d",
+ insn->sec, insn->offset, bp->base);
+ return -1;
+ }
+
+ orc->sp_offset = cfa->offset;
+ orc->bp_offset = bp->offset;
+ orc->type = insn->state.type;
+ }
+
+ return 0;
+}
+
+static int create_orc_entry(struct section *u_sec, struct section *ip_relasec,
+ unsigned int idx, struct section *insn_sec,
+ unsigned long insn_off, struct orc_entry *o)
+{
+ struct orc_entry *orc;
+ struct rela *rela;
+
+ if (!insn_sec->sym) {
+ WARN("missing symbol for section %s", insn_sec->name);
+ return -1;
+ }
+
+ /* populate ORC data */
+ orc = (struct orc_entry *)u_sec->data->d_buf + idx;
+ memcpy(orc, o, sizeof(*orc));
+
+ /* populate rela for ip */
+ rela = malloc(sizeof(*rela));
+ if (!rela) {
+ perror("malloc");
+ return -1;
+ }
+ memset(rela, 0, sizeof(*rela));
+
+ rela->sym = insn_sec->sym;
+ rela->addend = insn_off;
+ rela->type = R_X86_64_PC32;
+ rela->offset = idx * sizeof(int);
+
+ list_add_tail(&rela->list, &ip_relasec->rela_list);
+ hash_add(ip_relasec->rela_hash, &rela->hash, rela->offset);
+
+ return 0;
+}
+
+int create_orc_sections(struct objtool_file *file)
+{
+ struct instruction *insn, *prev_insn;
+ struct section *sec, *u_sec, *ip_relasec;
+ unsigned int idx;
+
+ struct orc_entry empty = {
+ .sp_reg = ORC_REG_UNDEFINED,
+ .bp_reg = ORC_REG_UNDEFINED,
+ .type = ORC_TYPE_CALL,
+ };
+
+ sec = find_section_by_name(file->elf, ".orc_unwind");
+ if (sec) {
+ WARN("file already has .orc_unwind section, skipping");
+ return -1;
+ }
+
+ /* count the number of needed orcs */
+ idx = 0;
+ for_each_sec(file, sec) {
+ if (!sec->text)
+ continue;
+
+ prev_insn = NULL;
+ sec_for_each_insn(file, sec, insn) {
+ if (!prev_insn ||
+ memcmp(&insn->orc, &prev_insn->orc,
+ sizeof(struct orc_entry))) {
+ idx++;
+ }
+ prev_insn = insn;
+ }
+
+ /* section terminator */
+ if (prev_insn)
+ idx++;
+ }
+ if (!idx)
+ return -1;
+
+
+ /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
+ sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
+ if (!sec)
+ return -1;
+
+ ip_relasec = elf_create_rela_section(file->elf, sec);
+ if (!ip_relasec)
+ return -1;
+
+ /* create .orc_unwind section */
+ u_sec = elf_create_section(file->elf, ".orc_unwind",
+ sizeof(struct orc_entry), idx);
+
+ /* populate sections */
+ idx = 0;
+ for_each_sec(file, sec) {
+ if (!sec->text)
+ continue;
+
+ prev_insn = NULL;
+ sec_for_each_insn(file, sec, insn) {
+ if (!prev_insn || memcmp(&insn->orc, &prev_insn->orc,
+ sizeof(struct orc_entry))) {
+
+ if (create_orc_entry(u_sec, ip_relasec, idx,
+ insn->sec, insn->offset,
+ &insn->orc))
+ return -1;
+
+ idx++;
+ }
+ prev_insn = insn;
+ }
+
+ /* section terminator */
+ if (prev_insn) {
+ if (create_orc_entry(u_sec, ip_relasec, idx,
+ prev_insn->sec,
+ prev_insn->offset + prev_insn->len,
+ &empty))
+ return -1;
+
+ idx++;
+ }
+ }
+
+ if (elf_rebuild_rela_section(ip_relasec))
+ return -1;
+
+ return 0;
+}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index bff8abb3a4aa..84f001d52322 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -91,16 +91,16 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
alt->jump_or_nop = entry->jump_or_nop;

if (alt->group) {
- alt->orig_len = *(unsigned char *)(sec->data + offset +
+ alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset +
entry->orig_len);
- alt->new_len = *(unsigned char *)(sec->data + offset +
+ alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
entry->new_len);
}

if (entry->feature) {
unsigned short feature;

- feature = *(unsigned short *)(sec->data + offset +
+ feature = *(unsigned short *)(sec->data->d_buf + offset +
entry->feature);

/*
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
new file mode 100755
index 000000000000..1470e74e9d66
--- /dev/null
+++ b/tools/objtool/sync-check.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+FILES='
+arch/x86/lib/insn.c
+arch/x86/lib/inat.c
+arch/x86/lib/x86-opcode-map.txt
+arch/x86/tools/gen-insn-attr-x86.awk
+arch/x86/include/asm/insn.h
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/inat_types.h
+arch/x86/include/asm/orc_types.h
+'
+
+check()
+{
+ local file=$1
+
+ diff $file ../../$file > /dev/null ||
+ echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
+}
+
+if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
+ exit 0
+fi
+
+for i in $FILES; do
+ check $i
+done
diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h
index ac7e07523e84..afd9f7a05f6d 100644
--- a/tools/objtool/warn.h
+++ b/tools/objtool/warn.h
@@ -18,6 +18,13 @@
#ifndef _WARN_H
#define _WARN_H

+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "elf.h"
+
extern const char *objname;

static inline char *offstr(struct section *sec, unsigned long offset)
@@ -57,4 +64,7 @@ static inline char *offstr(struct section *sec, unsigned long offset)
free(_str); \
})

+#define WARN_ELF(format, ...) \
+ WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
+
#endif /* _WARN_H */
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 0bda2cca2b3a..a4f98e131819 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -51,6 +51,7 @@ tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h
tools/include/asm-generic/bitops/const_hweight.h
tools/include/asm-generic/bitops/__ffs.h
+tools/include/asm-generic/bitops/__ffz.h
tools/include/asm-generic/bitops/__fls.h
tools/include/asm-generic/bitops/find.h
tools/include/asm-generic/bitops/fls64.h
@@ -60,7 +61,9 @@ tools/include/asm-generic/bitops.h
tools/include/linux/atomic.h
tools/include/linux/bitops.h
tools/include/linux/compiler.h
+tools/include/linux/compiler-gcc.h
tools/include/linux/coresight-pmu.h
+tools/include/linux/bug.h
tools/include/linux/filter.h
tools/include/linux/hash.h
tools/include/linux/kernel.h
@@ -70,12 +73,15 @@ tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/bpf_common.h
+tools/include/uapi/linux/fcntl.h
tools/include/uapi/linux/hw_breakpoint.h
tools/include/uapi/linux/mman.h
tools/include/uapi/linux/perf_event.h
+tools/include/uapi/linux/stat.h
tools/include/linux/poison.h
tools/include/linux/rbtree.h
tools/include/linux/rbtree_augmented.h
+tools/include/linux/refcount.h
tools/include/linux/string.h
tools/include/linux/stringify.h
tools/include/linux/types.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 2b92ffef554b..ad3726c5754d 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -177,6 +177,36 @@ ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
endif
endif

+# The fixdep build - we force fixdep tool to be built as
+# the first target in the separate make session not to be
+# disturbed by any parallel make jobs. Once fixdep is done
+# we issue the requested build with FIXDEP=1 variable.
+#
+# The fixdep build is disabled for $(NON_CONFIG_TARGETS)
+# targets, because it's not necessary.
+
+ifdef FIXDEP
+ force_fixdep := 0
+else
+ force_fixdep := $(config)
+endif
+
+export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
+export HOSTCC HOSTLD HOSTAR
+
+include $(srctree)/tools/build/Makefile.include
+
+ifeq ($(force_fixdep),1)
+goals := $(filter-out all sub-make, $(MAKECMDGOALS))
+
+$(goals) all: sub-make
+
+sub-make: fixdep
+ @./check-headers.sh
+ $(Q)$(MAKE) FIXDEP=1 -f Makefile.perf $(goals)
+
+else # force_fixdep
+
# Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
# Without this setting the output feature dump file misses some features, for
# example, liberty. Select all checkers so we won't get an incomplete feature
@@ -348,10 +378,6 @@ strip: $(PROGRAMS) $(OUTPUT)perf

PERF_IN := $(OUTPUT)perf-in.o

-export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
-export HOSTCC HOSTLD HOSTAR
-include $(srctree)/tools/build/Makefile.include
-
JEVENTS := $(OUTPUT)pmu-events/jevents
JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o

@@ -362,99 +388,6 @@ export JEVENTS
build := -f $(srctree)/tools/build/Makefile.build dir=. obj

$(PERF_IN): prepare FORCE
- @(test -f ../../include/uapi/linux/perf_event.h && ( \
- (diff -B ../include/uapi/linux/perf_event.h ../../include/uapi/linux/perf_event.h >/dev/null) \
- || echo "Warning: tools/include/uapi/linux/perf_event.h differs from kernel" >&2 )) || true
- @(test -f ../../include/linux/hash.h && ( \
- (diff -B ../include/linux/hash.h ../../include/linux/hash.h >/dev/null) \
- || echo "Warning: tools/include/linux/hash.h differs from kernel" >&2 )) || true
- @(test -f ../../include/uapi/linux/hw_breakpoint.h && ( \
- (diff -B ../include/uapi/linux/hw_breakpoint.h ../../include/uapi/linux/hw_breakpoint.h >/dev/null) \
- || echo "Warning: tools/include/uapi/linux/hw_breakpoint.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/include/asm/disabled-features.h && ( \
- (diff -B ../arch/x86/include/asm/disabled-features.h ../../arch/x86/include/asm/disabled-features.h >/dev/null) \
- || echo "Warning: tools/arch/x86/include/asm/disabled-features.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/include/asm/required-features.h && ( \
- (diff -B ../arch/x86/include/asm/required-features.h ../../arch/x86/include/asm/required-features.h >/dev/null) \
- || echo "Warning: tools/arch/x86/include/asm/required-features.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/include/asm/cpufeatures.h && ( \
- (diff -B ../arch/x86/include/asm/cpufeatures.h ../../arch/x86/include/asm/cpufeatures.h >/dev/null) \
- || echo "Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/lib/memcpy_64.S && ( \
- (diff -B ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \
- || echo "Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/lib/memset_64.S && ( \
- (diff -B ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \
- || echo "Warning: tools/arch/x86/lib/memset_64.S differs from kernel" >&2 )) || true
- @(test -f ../../arch/arm/include/uapi/asm/perf_regs.h && ( \
- (diff -B ../arch/arm/include/uapi/asm/perf_regs.h ../../arch/arm/include/uapi/asm/perf_regs.h >/dev/null) \
- || echo "Warning: tools/arch/arm/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/arm64/include/uapi/asm/perf_regs.h && ( \
- (diff -B ../arch/arm64/include/uapi/asm/perf_regs.h ../../arch/arm64/include/uapi/asm/perf_regs.h >/dev/null) \
- || echo "Warning: tools/arch/arm64/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/powerpc/include/uapi/asm/perf_regs.h && ( \
- (diff -B ../arch/powerpc/include/uapi/asm/perf_regs.h ../../arch/powerpc/include/uapi/asm/perf_regs.h >/dev/null) \
- || echo "Warning: tools/arch/powerpc/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/include/uapi/asm/perf_regs.h && ( \
- (diff -B ../arch/x86/include/uapi/asm/perf_regs.h ../../arch/x86/include/uapi/asm/perf_regs.h >/dev/null) \
- || echo "Warning: tools/arch/x86/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/include/uapi/asm/kvm.h && ( \
- (diff -B ../arch/x86/include/uapi/asm/kvm.h ../../arch/x86/include/uapi/asm/kvm.h >/dev/null) \
- || echo "Warning: tools/arch/x86/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/include/uapi/asm/kvm_perf.h && ( \
- (diff -B ../arch/x86/include/uapi/asm/kvm_perf.h ../../arch/x86/include/uapi/asm/kvm_perf.h >/dev/null) \
- || echo "Warning: tools/arch/x86/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/include/uapi/asm/svm.h && ( \
- (diff -B ../arch/x86/include/uapi/asm/svm.h ../../arch/x86/include/uapi/asm/svm.h >/dev/null) \
- || echo "Warning: tools/arch/x86/include/uapi/asm/svm.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/x86/include/uapi/asm/vmx.h && ( \
- (diff -B ../arch/x86/include/uapi/asm/vmx.h ../../arch/x86/include/uapi/asm/vmx.h >/dev/null) \
- || echo "Warning: tools/arch/x86/include/uapi/asm/vmx.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/powerpc/include/uapi/asm/kvm.h && ( \
- (diff -B ../arch/powerpc/include/uapi/asm/kvm.h ../../arch/powerpc/include/uapi/asm/kvm.h >/dev/null) \
- || echo "Warning: tools/arch/powerpc/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/s390/include/uapi/asm/kvm.h && ( \
- (diff -B ../arch/s390/include/uapi/asm/kvm.h ../../arch/s390/include/uapi/asm/kvm.h >/dev/null) \
- || echo "Warning: tools/arch/s390/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/s390/include/uapi/asm/kvm_perf.h && ( \
- (diff -B ../arch/s390/include/uapi/asm/kvm_perf.h ../../arch/s390/include/uapi/asm/kvm_perf.h >/dev/null) \
- || echo "Warning: tools/arch/s390/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/s390/include/uapi/asm/sie.h && ( \
- (diff -B ../arch/s390/include/uapi/asm/sie.h ../../arch/s390/include/uapi/asm/sie.h >/dev/null) \
- || echo "Warning: tools/arch/s390/include/uapi/asm/sie.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/arm/include/uapi/asm/kvm.h && ( \
- (diff -B ../arch/arm/include/uapi/asm/kvm.h ../../arch/arm/include/uapi/asm/kvm.h >/dev/null) \
- || echo "Warning: tools/arch/arm/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
- @(test -f ../../arch/arm64/include/uapi/asm/kvm.h && ( \
- (diff -B ../arch/arm64/include/uapi/asm/kvm.h ../../arch/arm64/include/uapi/asm/kvm.h >/dev/null) \
- || echo "Warning: tools/arch/arm64/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
- @(test -f ../../include/asm-generic/bitops/arch_hweight.h && ( \
- (diff -B ../include/asm-generic/bitops/arch_hweight.h ../../include/asm-generic/bitops/arch_hweight.h >/dev/null) \
- || echo "Warning: tools/include/asm-generic/bitops/arch_hweight.h differs from kernel" >&2 )) || true
- @(test -f ../../include/asm-generic/bitops/const_hweight.h && ( \
- (diff -B ../include/asm-generic/bitops/const_hweight.h ../../include/asm-generic/bitops/const_hweight.h >/dev/null) \
- || echo "Warning: tools/include/asm-generic/bitops/const_hweight.h differs from kernel" >&2 )) || true
- @(test -f ../../include/asm-generic/bitops/__fls.h && ( \
- (diff -B ../include/asm-generic/bitops/__fls.h ../../include/asm-generic/bitops/__fls.h >/dev/null) \
- || echo "Warning: tools/include/asm-generic/bitops/__fls.h differs from kernel" >&2 )) || true
- @(test -f ../../include/asm-generic/bitops/fls.h && ( \
- (diff -B ../include/asm-generic/bitops/fls.h ../../include/asm-generic/bitops/fls.h >/dev/null) \
- || echo "Warning: tools/include/asm-generic/bitops/fls.h differs from kernel" >&2 )) || true
- @(test -f ../../include/asm-generic/bitops/fls64.h && ( \
- (diff -B ../include/asm-generic/bitops/fls64.h ../../include/asm-generic/bitops/fls64.h >/dev/null) \
- || echo "Warning: tools/include/asm-generic/bitops/fls64.h differs from kernel" >&2 )) || true
- @(test -f ../../include/linux/coresight-pmu.h && ( \
- (diff -B ../include/linux/coresight-pmu.h ../../include/linux/coresight-pmu.h >/dev/null) \
- || echo "Warning: tools/include/linux/coresight-pmu.h differs from kernel" >&2 )) || true
- @(test -f ../../include/uapi/asm-generic/mman-common.h && ( \
- (diff -B ../include/uapi/asm-generic/mman-common.h ../../include/uapi/asm-generic/mman-common.h >/dev/null) \
- || echo "Warning: tools/include/uapi/asm-generic/mman-common.h differs from kernel" >&2 )) || true
- @(test -f ../../include/uapi/asm-generic/mman.h && ( \
- (diff -B -I "^#include <\(uapi/\)*asm-generic/mman-common.h>$$" ../include/uapi/asm-generic/mman.h ../../include/uapi/asm-generic/mman.h >/dev/null) \
- || echo "Warning: tools/include/uapi/asm-generic/mman.h differs from kernel" >&2 )) || true
- @(test -f ../../include/uapi/linux/mman.h && ( \
- (diff -B -I "^#include <\(uapi/\)*asm/mman.h>$$" ../include/uapi/linux/mman.h ../../include/uapi/linux/mman.h >/dev/null) \
- || echo "Warning: tools/include/uapi/linux/mman.h differs from kernel" >&2 )) || true
$(Q)$(MAKE) $(build)=perf

$(JEVENTS_IN): FORCE
@@ -470,7 +403,7 @@ $(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_L
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
$(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@

-$(GTK_IN): fixdep FORCE
+$(GTK_IN): FORCE
$(Q)$(MAKE) $(build)=gtk

$(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS)
@@ -515,7 +448,7 @@ endif
__build-dir = $(subst $(OUTPUT),,$(dir $@))
build-dir = $(if $(__build-dir),$(__build-dir),.)

-prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep archheaders
+prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders

$(OUTPUT)%.o: %.c prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -555,7 +488,7 @@ $(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)

LIBPERF_IN := $(OUTPUT)libperf-in.o

-$(LIBPERF_IN): prepare fixdep FORCE
+$(LIBPERF_IN): prepare FORCE
$(Q)$(MAKE) $(build)=libperf

$(LIB_FILE): $(LIBPERF_IN)
@@ -563,10 +496,10 @@ $(LIB_FILE): $(LIBPERF_IN)

LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)

-$(LIBTRACEEVENT): fixdep FORCE
+$(LIBTRACEEVENT): FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a

-libtraceevent_plugins: fixdep FORCE
+libtraceevent_plugins: FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins

$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
@@ -579,21 +512,21 @@ $(LIBTRACEEVENT)-clean:
install-traceevent-plugins: libtraceevent_plugins
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins

-$(LIBAPI): fixdep FORCE
+$(LIBAPI): FORCE
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a

$(LIBAPI)-clean:
$(call QUIET_CLEAN, libapi)
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null

-$(LIBBPF): fixdep FORCE
+$(LIBBPF): FORCE
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)

$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null

-$(LIBSUBCMD): fixdep FORCE
+$(LIBSUBCMD): FORCE
$(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) $(OUTPUT)libsubcmd.a

$(LIBSUBCMD)-clean:
@@ -790,3 +723,4 @@ FORCE:
.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
.PHONY: libtraceevent_plugins archheaders

+endif # force_fixdep
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 555263e385c9..e93ef0b38db8 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -335,6 +335,9 @@
326 common copy_file_range sys_copy_file_range
327 64 preadv2 sys_preadv2
328 64 pwritev2 sys_pwritev2
+329 common pkey_mprotect sys_pkey_mprotect
+330 common pkey_alloc sys_pkey_alloc
+331 common pkey_free sys_pkey_free

#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -374,5 +377,5 @@
543 x32 io_setup compat_sys_io_setup
544 x32 io_submit compat_sys_io_submit
545 x32 execveat compat_sys_execveat/ptregs
-534 x32 preadv2 compat_sys_preadv2
-535 x32 pwritev2 compat_sys_pwritev2
+546 x32 preadv2 compat_sys_preadv64v2
+547 x32 pwritev2 compat_sys_pwritev64v2
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
new file mode 100755
index 000000000000..83fe2202382e
--- /dev/null
+++ b/tools/perf/check-headers.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+
+HEADERS='
+include/uapi/linux/fcntl.h
+include/uapi/linux/perf_event.h
+include/uapi/linux/stat.h
+include/linux/hash.h
+include/uapi/linux/hw_breakpoint.h
+arch/x86/include/asm/disabled-features.h
+arch/x86/include/asm/required-features.h
+arch/x86/include/asm/cpufeatures.h
+arch/arm/include/uapi/asm/perf_regs.h
+arch/arm64/include/uapi/asm/perf_regs.h
+arch/powerpc/include/uapi/asm/perf_regs.h
+arch/x86/include/uapi/asm/perf_regs.h
+arch/x86/include/uapi/asm/kvm.h
+arch/x86/include/uapi/asm/kvm_perf.h
+arch/x86/include/uapi/asm/svm.h
+arch/x86/include/uapi/asm/vmx.h
+arch/powerpc/include/uapi/asm/kvm.h
+arch/s390/include/uapi/asm/kvm.h
+arch/s390/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/sie.h
+arch/arm/include/uapi/asm/kvm.h
+arch/arm64/include/uapi/asm/kvm.h
+include/asm-generic/bitops/arch_hweight.h
+include/asm-generic/bitops/const_hweight.h
+include/asm-generic/bitops/__fls.h
+include/asm-generic/bitops/fls.h
+include/asm-generic/bitops/fls64.h
+include/linux/coresight-pmu.h
+include/uapi/asm-generic/mman-common.h
+'
+
+check () {
+ file=$1
+ opts=
+
+ shift
+ while [ -n "$*" ]; do
+ opts="$opts \"$1\""
+ shift
+ done
+
+ cmd="diff $opts ../$file ../../$file > /dev/null"
+
+ test -f ../../$file &&
+ eval $cmd || echo "Warning: $file differs from kernel" >&2
+}
+
+
+# simple diff check
+for i in $HEADERS; do
+ check $i -B
+done
+
+# diff with extra ignore lines
+check arch/x86/lib/memcpy_64.S -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
+check arch/x86/lib/memset_64.S -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
+check include/uapi/asm-generic/mman.h -B -I "^#include <\(uapi/\)*asm-generic/mman-common.h>"
+check include/uapi/linux/mman.h -B -I "^#include <\(uapi/\)*asm/mman.h>"
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 43899e0d6fa1..e72d370889f8 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -23,8 +23,6 @@
#endif
#endif

-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-
#ifdef __GNUC__
#define TYPEOF(x) (__typeof__(x))
#else