[tip:x86/cache] x86/resctrl: Introduce AMD QOS feature

From: tip-bot for Babu Moger
Date: Thu Nov 22 2018 - 16:52:42 EST


Commit-ID: 4d05bf71f157d756932e77cdee16dc99e235d636
Gitweb: https://git.kernel.org/tip/4d05bf71f157d756932e77cdee16dc99e235d636
Author: Babu Moger <Babu.Moger@xxxxxxx>
AuthorDate: Wed, 21 Nov 2018 20:28:45 +0000
Committer: Borislav Petkov <bp@xxxxxxx>
CommitDate: Thu, 22 Nov 2018 20:16:20 +0100

x86/resctrl: Introduce AMD QOS feature

Enable QOS feature on AMD.

Following QoS sub-features are supported on AMD if the underlying
hardware supports it:

- L3 Cache allocation enforcement
- L3 Cache occupancy monitoring
- L3 Code-Data Prioritization support
- Memory Bandwidth Enforcement (Allocation)

The specification is available at:
https://developer.amd.com/wp-content/resources/56375.pdf

There are differences in the way some of the features are implemented.
Separate those functions and add those as vendor specific functions.

The major difference is in MBA feature:

- AMD uses CPUID leaf 0x80000020 to initialize the MBA features.
- AMD uses direct bandwidth value instead of delay based on bandwidth values.
- MSR register base addresses are different for MBA.
- AMD allows non-contiguous L3 cache bit masks.

Signed-off-by: Babu Moger <babu.moger@xxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Arnd Bergmann <arnd@xxxxxxxx>
Cc: Brijesh Singh <brijesh.singh@xxxxxxx>
Cc: "Chang S. Bae" <chang.seok.bae@xxxxxxxxx>
Cc: David Miller <davem@xxxxxxxxxxxxx>
Cc: David Woodhouse <dwmw2@xxxxxxxxxxxxx>
Cc: Dmitry Safonov <dima@xxxxxxxxxx>
Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx>
Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Jann Horn <jannh@xxxxxxxxxx>
Cc: Joerg Roedel <jroedel@xxxxxxx>
Cc: Jonathan Corbet <corbet@xxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Kate Stewart <kstewart@xxxxxxxxxxxxxxxxxxx>
Cc: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx>
Cc: <linux-doc@xxxxxxxxxxxxxxx>
Cc: Mauro Carvalho Chehab <mchehab+samsung@xxxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Philippe Ombredanne <pombredanne@xxxxxxxx>
Cc: Pu Wen <puwen@xxxxxxxx>
Cc: <qianyue.zj@xxxxxxxxxxxxxxx>
Cc: "Rafael J. Wysocki" <rafael@xxxxxxxxxx>
Cc: Reinette Chatre <reinette.chatre@xxxxxxxxx>
Cc: Rian Hunter <rian@xxxxxxxxxxxx>
Cc: Sherry Hurwitz <sherry.hurwitz@xxxxxxx>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Thomas Lendacky <Thomas.Lendacky@xxxxxxx>
Cc: Tony Luck <tony.luck@xxxxxxxxx>
Cc: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
Cc: <xiaochen.shen@xxxxxxxxx>
Link: https://lkml.kernel.org/r/20181121202811.4492-12-babu.moger@xxxxxxx
---
arch/x86/kernel/cpu/resctrl/core.c | 69 ++++++++++++++++++++++++++++--
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 71 +++++++++++++++++++++++++++++++
arch/x86/kernel/cpu/resctrl/internal.h | 5 +++
3 files changed, 142 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index ba5a5b8c4681..2ec252be4ed9 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -61,6 +61,9 @@ mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
struct rdt_resource *r);
static void
cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
+static void
+mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
+ struct rdt_resource *r);

#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)

@@ -255,7 +258,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
return false;
}

-static bool __get_mem_config(struct rdt_resource *r)
+static bool __get_mem_config_intel(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;
@@ -281,6 +284,30 @@ static bool __get_mem_config(struct rdt_resource *r)
return true;
}

+static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
+{
+ union cpuid_0x10_3_eax eax;
+ union cpuid_0x10_x_edx edx;
+ u32 ebx, ecx;
+
+ cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
+ r->num_closid = edx.split.cos_max + 1;
+ r->default_ctrl = MAX_MBA_BW_AMD;
+
+ /* AMD does not use delay */
+ r->membw.delay_linear = false;
+
+ r->membw.min_bw = 0;
+ r->membw.bw_gran = 1;
+ /* Max value is 2048, Data width should be 4 in decimal */
+ r->data_width = 4;
+
+ r->alloc_capable = true;
+ r->alloc_enabled = true;
+
+ return true;
+}
+
static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
{
union cpuid_0x10_1_eax eax;
@@ -340,6 +367,15 @@ static int get_cache_id(int cpu, int level)
return -1;
}

+static void
+mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
+{
+ unsigned int i;
+
+ for (i = m->low; i < m->high; i++)
+ wrmsrl(r->msr_base + i, d->ctrl_val[i]);
+}
+
/*
* Map the memory b/w percentage value to delay values
* that can be written to QOS_MSRs.
@@ -793,8 +829,13 @@ static bool __init rdt_cpu_has(int flag)

static __init bool get_mem_config(void)
{
- if (rdt_cpu_has(X86_FEATURE_MBA))
- return __get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]);
+ if (!rdt_cpu_has(X86_FEATURE_MBA))
+ return false;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);

return false;
}
@@ -893,10 +934,32 @@ static __init void rdt_init_res_defs_intel(void)
}
}

+static __init void rdt_init_res_defs_amd(void)
+{
+ struct rdt_resource *r;
+
+ for_each_rdt_resource(r) {
+ if (r->rid == RDT_RESOURCE_L3 ||
+ r->rid == RDT_RESOURCE_L3DATA ||
+ r->rid == RDT_RESOURCE_L3CODE ||
+ r->rid == RDT_RESOURCE_L2 ||
+ r->rid == RDT_RESOURCE_L2DATA ||
+ r->rid == RDT_RESOURCE_L2CODE)
+ r->cbm_validate = cbm_validate_amd;
+ else if (r->rid == RDT_RESOURCE_MBA) {
+ r->msr_base = MSR_IA32_MBA_BW_BASE;
+ r->msr_update = mba_wrmsr_amd;
+ r->parse_ctrlval = parse_bw_amd;
+ }
+ }
+}
+
static __init void rdt_init_res_defs(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
rdt_init_res_defs_intel();
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ rdt_init_res_defs_amd();
}

static enum cpuhp_state rdt_online;
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index bfd7bdf8a156..43ee3cee6494 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -28,6 +28,53 @@
#include <linux/slab.h>
#include "internal.h"

+/*
+ * Check whether MBA bandwidth percentage value is correct. The value is
+ * checked against the minimum and maximum bandwidth values specified by
+ * the hardware. The allocated bandwidth percentage is rounded to the next
+ * control step available on the hardware.
+ */
+static bool bw_validate_amd(char *buf, unsigned long *data,
+ struct rdt_resource *r)
+{
+ unsigned long bw;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &bw);
+ if (ret) {
+ rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
+ return false;
+ }
+
+ if (bw < r->membw.min_bw || bw > r->default_ctrl) {
+ rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
+ r->membw.min_bw, r->default_ctrl);
+ return false;
+ }
+
+ *data = roundup(bw, (unsigned long)r->membw.bw_gran);
+ return true;
+}
+
+int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
+{
+ unsigned long bw_val;
+
+ if (d->have_new_ctrl) {
+ rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
+ return -EINVAL;
+ }
+
+ if (!bw_validate_amd(data->buf, &bw_val, r))
+ return -EINVAL;
+
+ d->new_ctrl = bw_val;
+ d->have_new_ctrl = true;
+
+ return 0;
+}
+
/*
* Check whether MBA bandwidth percentage value is correct. The value is
* checked against the minimum and max bandwidth values specified by the
@@ -123,6 +170,30 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
return true;
}

+/*
+ * Check whether a cache bit mask is valid. AMD allows non-contiguous
+ * bitmasks
+ */
+bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
+{
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret) {
+ rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
+ return false;
+ }
+
+ if (val > r->default_ctrl) {
+ rdt_last_cmd_puts("Mask out of range\n");
+ return false;
+ }
+
+ *data = val;
+ return true;
+}
+
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 599cad34a6a8..822b7db634ee 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -11,6 +11,7 @@
#define MSR_IA32_L3_CBM_BASE 0xc90
#define MSR_IA32_L2_CBM_BASE 0xd10
#define MSR_IA32_MBA_THRTL_BASE 0xd50
+#define MSR_IA32_MBA_BW_BASE 0xc0000200

#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d
@@ -34,6 +35,7 @@
#define MAX_MBA_BW 100u
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
+#define MAX_MBA_BW_AMD 0x800

#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
@@ -448,6 +450,8 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
+int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);

extern struct mutex rdtgroup_mutex;

@@ -579,5 +583,6 @@ void cqm_handle_limbo(struct work_struct *work);
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
+bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);

#endif /* _ASM_X86_RESCTRL_INTERNAL_H */