[PATCH v2 06/14] KVM: x86: pass struct kvm_pit instead of kvm in PIT

From: Radim KrÄmÃÅ
Date: Wed Feb 17 2016 - 14:18:55 EST


This patch passes struct kvm_pit into internal PIT functions.
Those functions used to get PIT through kvm->arch.vpit, even though most
of them never used *kvm for other purposes. Another benefit is that we
don't need to set kvm->arch.vpit during initialization.

Signed-off-by: Radim KrÄmÃÅ <rkrcmar@xxxxxxxxxx>
---
v2: new

arch/x86/kvm/i8254.c | 112 ++++++++++++++++++++++++---------------------------
arch/x86/kvm/i8254.h | 4 +-
arch/x86/kvm/x86.c | 26 +++++++-----
3 files changed, 70 insertions(+), 72 deletions(-)

diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 328b21f3ab7c..c11734dd5092 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -71,10 +71,9 @@ static u64 muldiv64(u64 a, u32 b, u32 c)
return res.ll;
}

-static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
+static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];

switch (c->mode) {
default:
@@ -95,16 +94,16 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
c->gate = val;
}

-static int pit_get_gate(struct kvm *kvm, int channel)
+static int pit_get_gate(struct kvm_pit *pit, int channel)
{
- return kvm->arch.vpit->pit_state.channels[channel].gate;
+ return pit->pit_state.channels[channel].gate;
}

-static s64 __kpit_elapsed(struct kvm *kvm)
+static s64 __kpit_elapsed(struct kvm_pit *pit)
{
s64 elapsed;
ktime_t remaining;
- struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+ struct kvm_kpit_state *ps = &pit->pit_state;

if (!ps->period)
return 0;
@@ -124,23 +123,22 @@ static s64 __kpit_elapsed(struct kvm *kvm)
return elapsed;
}

-static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c,
+static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
int channel)
{
if (channel == 0)
- return __kpit_elapsed(kvm);
+ return __kpit_elapsed(pit);

return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
}

-static int pit_get_count(struct kvm *kvm, int channel)
+static int pit_get_count(struct kvm_pit *pit, int channel)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
s64 d, t;
int counter;

- t = kpit_elapsed(kvm, c, channel);
+ t = kpit_elapsed(pit, c, channel);
d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);

switch (c->mode) {
@@ -161,14 +159,13 @@ static int pit_get_count(struct kvm *kvm, int channel)
return counter;
}

-static int pit_get_out(struct kvm *kvm, int channel)
+static int pit_get_out(struct kvm_pit *pit, int channel)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
s64 d, t;
int out;

- t = kpit_elapsed(kvm, c, channel);
+ t = kpit_elapsed(pit, c, channel);
d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);

switch (c->mode) {
@@ -194,25 +191,23 @@ static int pit_get_out(struct kvm *kvm, int channel)
return out;
}

-static void pit_latch_count(struct kvm *kvm, int channel)
+static void pit_latch_count(struct kvm_pit *pit, int channel)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];

if (!c->count_latched) {
- c->latched_count = pit_get_count(kvm, channel);
+ c->latched_count = pit_get_count(pit, channel);
c->count_latched = c->rw_mode;
}
}

-static void pit_latch_status(struct kvm *kvm, int channel)
+static void pit_latch_status(struct kvm_pit *pit, int channel)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];

if (!c->status_latched) {
/* TODO: Return NULL COUNT (bit 6). */
- c->status = ((pit_get_out(kvm, channel) << 7) |
+ c->status = ((pit_get_out(pit, channel) << 7) |
(c->rw_mode << 4) |
(c->mode << 1) |
c->bcd);
@@ -306,9 +301,10 @@ static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
atomic_set(&pit->pit_state.irq_ack, 1);
}

-static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
+static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
{
- struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+ struct kvm_kpit_state *ps = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
s64 interval;

if (!ioapic_in_kernel(kvm) ||
@@ -326,9 +322,9 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
ps->is_periodic = is_period;

ps->timer.function = pit_timer_fn;
- ps->kvm = ps->pit->kvm;
+ ps->kvm = pit->kvm;

- kvm_pit_reset_reinject(ps->pit);
+ kvm_pit_reset_reinject(pit);

/*
* Do not allow the guest to program periodic timers with small
@@ -351,9 +347,9 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
HRTIMER_MODE_ABS);
}

-static void pit_load_count(struct kvm *kvm, int channel, u32 val)
+static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
{
- struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+ struct kvm_kpit_state *ps = &pit->pit_state;

pr_debug("load_count val is %d, channel is %d\n", val, channel);

@@ -378,32 +374,33 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
case 1:
/* FIXME: enhance mode 4 precision */
case 4:
- create_pit_timer(kvm, val, 0);
+ create_pit_timer(pit, val, 0);
break;
case 2:
case 3:
- create_pit_timer(kvm, val, 1);
+ create_pit_timer(pit, val, 1);
break;
default:
- destroy_pit_timer(kvm->arch.vpit);
+ destroy_pit_timer(pit);
}
}

-void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start)
+void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
+ int hpet_legacy_start)
{
u8 saved_mode;

- WARN_ON_ONCE(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+ WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));

if (hpet_legacy_start) {
/* save existing mode for later reenablement */
WARN_ON(channel != 0);
- saved_mode = kvm->arch.vpit->pit_state.channels[0].mode;
- kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */
- pit_load_count(kvm, channel, val);
- kvm->arch.vpit->pit_state.channels[0].mode = saved_mode;
+ saved_mode = pit->pit_state.channels[0].mode;
+ pit->pit_state.channels[0].mode = 0xff; /* disable timer */
+ pit_load_count(pit, channel, val);
+ pit->pit_state.channels[0].mode = saved_mode;
} else {
- pit_load_count(kvm, channel, val);
+ pit_load_count(pit, channel, val);
}
}

@@ -429,7 +426,6 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
{
struct kvm_pit *pit = dev_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
- struct kvm *kvm = pit->kvm;
int channel, access;
struct kvm_kpit_channel_state *s;
u32 val = *(u32 *) data;
@@ -453,9 +449,9 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
s = &pit_state->channels[channel];
if (val & (2 << channel)) {
if (!(val & 0x20))
- pit_latch_count(kvm, channel);
+ pit_latch_count(pit, channel);
if (!(val & 0x10))
- pit_latch_status(kvm, channel);
+ pit_latch_status(pit, channel);
}
}
} else {
@@ -463,7 +459,7 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
s = &pit_state->channels[channel];
access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
if (access == 0) {
- pit_latch_count(kvm, channel);
+ pit_latch_count(pit, channel);
} else {
s->rw_mode = access;
s->read_state = access;
@@ -480,17 +476,17 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
switch (s->write_state) {
default:
case RW_STATE_LSB:
- pit_load_count(kvm, addr, val);
+ pit_load_count(pit, addr, val);
break;
case RW_STATE_MSB:
- pit_load_count(kvm, addr, val << 8);
+ pit_load_count(pit, addr, val << 8);
break;
case RW_STATE_WORD0:
s->write_latch = val;
s->write_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
- pit_load_count(kvm, addr, s->write_latch | (val << 8));
+ pit_load_count(pit, addr, s->write_latch | (val << 8));
s->write_state = RW_STATE_WORD0;
break;
}
@@ -506,7 +502,6 @@ static int pit_ioport_read(struct kvm_vcpu *vcpu,
{
struct kvm_pit *pit = dev_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
- struct kvm *kvm = pit->kvm;
int ret, count;
struct kvm_kpit_channel_state *s;
if (!pit_in_range(addr))
@@ -543,20 +538,20 @@ static int pit_ioport_read(struct kvm_vcpu *vcpu,
switch (s->read_state) {
default:
case RW_STATE_LSB:
- count = pit_get_count(kvm, addr);
+ count = pit_get_count(pit, addr);
ret = count & 0xff;
break;
case RW_STATE_MSB:
- count = pit_get_count(kvm, addr);
+ count = pit_get_count(pit, addr);
ret = (count >> 8) & 0xff;
break;
case RW_STATE_WORD0:
- count = pit_get_count(kvm, addr);
+ count = pit_get_count(pit, addr);
ret = count & 0xff;
s->read_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
- count = pit_get_count(kvm, addr);
+ count = pit_get_count(pit, addr);
ret = (count >> 8) & 0xff;
s->read_state = RW_STATE_WORD0;
break;
@@ -577,14 +572,13 @@ static int speaker_ioport_write(struct kvm_vcpu *vcpu,
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
- struct kvm *kvm = pit->kvm;
u32 val = *(u32 *) data;
if (addr != KVM_SPEAKER_BASE_ADDRESS)
return -EOPNOTSUPP;

mutex_lock(&pit_state->lock);
pit_state->speaker_data_on = (val >> 1) & 1;
- pit_set_gate(kvm, 2, val & 1);
+ pit_set_gate(pit, 2, val & 1);
mutex_unlock(&pit_state->lock);
return 0;
}
@@ -595,7 +589,6 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu,
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
- struct kvm *kvm = pit->kvm;
unsigned int refresh_clock;
int ret;
if (addr != KVM_SPEAKER_BASE_ADDRESS)
@@ -605,8 +598,8 @@ static int speaker_ioport_read(struct kvm_vcpu *vcpu,
refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;

mutex_lock(&pit_state->lock);
- ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
- (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
+ ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) |
+ (pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
if (len > sizeof(ret))
len = sizeof(ret);
memcpy(data, (char *)&ret, len);
@@ -625,7 +618,7 @@ void kvm_pit_reset(struct kvm_pit *pit)
c = &pit->pit_state.channels[i];
c->mode = 0xff;
c->gate = (i != 2);
- pit_load_count(pit->kvm, i, 0);
+ pit_load_count(pit, i, 0);
}
mutex_unlock(&pit->pit_state.lock);

@@ -687,7 +680,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
}
init_kthread_work(&pit->expired, pit_do_work);

- kvm->arch.vpit = pit;
pit->kvm = kvm;

pit_state = &pit->pit_state;
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index f8cf4b84f435..a6aceaf08df5 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -56,9 +56,11 @@ struct kvm_pit {
#define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
#define KVM_PIT_CHANNEL_MASK 0x3

-void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start);
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
void kvm_free_pit(struct kvm *kvm);
+
void kvm_pit_reset(struct kvm_pit *pit);
+void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
+ int hpet_legacy_start);

#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cf15bc5b0dff..57b296466bf0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3637,11 +3637,13 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int i;
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
- memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
+ struct kvm_pit *pit = kvm->arch.vpit;
+
+ mutex_lock(&pit->pit_state.lock);
+ memcpy(&pit->pit_state, ps, sizeof(struct kvm_pit_state));
for (i = 0; i < 3; i++)
- kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
+ mutex_unlock(&pit->pit_state.lock);
return 0;
}

@@ -3661,18 +3663,20 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
int start = 0;
int i;
u32 prev_legacy, cur_legacy;
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
- prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
+ struct kvm_pit *pit = kvm->arch.vpit;
+
+ mutex_lock(&pit->pit_state.lock);
+ prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
if (!prev_legacy && cur_legacy)
start = 1;
- memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
- sizeof(kvm->arch.vpit->pit_state.channels));
- kvm->arch.vpit->pit_state.flags = ps->flags;
+ memcpy(&pit->pit_state.channels, &ps->channels,
+ sizeof(pit->pit_state.channels));
+ pit->pit_state.flags = ps->flags;
for (i = 0; i < 3; i++)
- kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count,
+ kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
start && i == 0);
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ mutex_unlock(&pit->pit_state.lock);
return 0;
}

--
2.7.1