Re: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type

From: Xiao Guangrong
Date: Mon Jun 08 2015 - 22:50:11 EST




On 06/09/2015 08:36 AM, David Matlack wrote:
On Sat, May 30, 2015 at 3:59 AM, Xiao Guangrong
<guangrong.xiao@xxxxxxxxxxxxxxx> wrote:
It walks all MTRRs and gets all the memory cache type setting for the
specified range also it checks if the range is fully covered by MTRRs

Signed-off-by: Xiao Guangrong <guangrong.xiao@xxxxxxxxxxxxxxx>
---
arch/x86/kvm/mtrr.c | 183 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 183 insertions(+)

diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e59d138..35f86303 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
}

+struct mtrr_looker {
+ /* input fields. */
+ struct kvm_mtrr *mtrr_state;
+ u64 start;
+ u64 end;
+
+ /* output fields. */
+ int mem_type;
+ /* [start, end) is fully covered in MTRRs? */

s/fully/not fully/ ?

Yup, thanks for pointing it out.


+ bool partial_map;
+
+ /* private fields. */
+ union {
+ /* used for fixed MTRRs. */
+ struct {
+ int index;
+ int seg;
+ };
+
+ /* used for var MTRRs. */
+ struct {
+ struct kvm_mtrr_range *range;
+ /* max address has been covered in var MTRRs. */
+ u64 start_max;
+ };
+ };
+
+ bool fixed;
+};
+
+static void mtrr_lookup_init(struct mtrr_looker *looker,
+ struct kvm_mtrr *mtrr_state, u64 start, u64 end)
+{
+ looker->mtrr_state = mtrr_state;
+ looker->start = start;
+ looker->end = end;
+}
+
+static u64 fixed_mtrr_range_end_addr(int seg, int index)
+{
+ struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
+
+ return mtrr_seg->start + mtrr_seg->range_size * index;

Should be (index + 1)?

Good eyes, will fix.


+}
+
+static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
+{
+ int seg, index;
+
+ if (!looker->mtrr_state->fixed_mtrr_enabled)
+ return false;
+
+ seg = fixed_mtrr_addr_to_seg(looker->start);
+ if (seg < 0)
+ return false;
+
+ looker->fixed = true;
+ index = fixed_mtrr_addr_seg_to_range_index(looker->start, seg);
+ looker->index = index;
+ looker->seg = seg;
+ looker->mem_type = looker->mtrr_state->fixed_ranges[index];
+ looker->start = fixed_mtrr_range_end_addr(seg, index);
+ return true;
+}
+
+static bool match_var_range(struct mtrr_looker *looker,
+ struct kvm_mtrr_range *range)
+{
+ u64 start, end;
+
+ var_mtrr_range(range, &start, &end);
+ if (!(start >= looker->end || end <= looker->start)) {
+ looker->range = range;
+ looker->mem_type = range->base & 0xff;
+
+ /*
+ * the function is called when we do kvm_mtrr.head walking
+ * that means range has the minimum base address interleaves
+ * with [looker->start_max, looker->end).
+ */

I'm having trouble understanding this comment. I think this is what you
are trying to say:

this function is called when we do kvm_mtrr.head walking. range has the
minimum base address which interleaves [looker->start_max, looker->end).

Let me know if I parsed it wrong.

Yes, it is, will improve the comment.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/