Some cpus have special support for switching PERF_GLOBAL_CTRL msr.
Add logic to detect if such support exists and works properly and extend
msr switching code to use it if available. Also extend number of generic
msr switching entries to 8.
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -1195,10 +1196,29 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
{
unsigned i;
struct msr_autoload *m =&vmx->msr_autoload;
+ u32 entry_load, exit_load;
+ bool done = false;
- if (msr == MSR_EFER&& cpu_has_load_ia32_efer) {
- vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
- vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+ switch (msr) {
+ case MSR_EFER:
+ if (cpu_has_load_ia32_efer) {
+ entry_load = VM_ENTRY_LOAD_IA32_EFER;
+ exit_load = VM_EXIT_LOAD_IA32_EFER;
+ done = true;
+ }
+ break;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ if (cpu_has_load_perf_global_ctrl) {
+ entry_load = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ exit_load = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ done = true;
+ }
+ break;
+ }
+
+ if (done) {
+ vmcs_clear_bits(VM_ENTRY_CONTROLS, entry_load);
+ vmcs_clear_bits(VM_EXIT_CONTROLS, exit_load);
return;
}