From: Tan Xiaojun <tanxiaojun(a)huawei.com>
commit a7240fe505d3e49dbe6eb2c67deef7dd1915911f openEuler-1.0
hulk inclusion
category: feature
bugzilla: 5510
CVE: NA
split from xiaojun's big patch.
Signed-off-by: Tan Xiaojun <tanxiaojun(a)huawei.com>
Signed-off-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Signed-off-by: Xin Hao <haoxing990(a)gmail.com>
---
arch/arm64/include/asm/mpam_sched.h | 60 +++++++++++++++++++++++++++++
1 file changed, 60 insertions(+)
diff --git a/arch/arm64/include/asm/mpam_sched.h b/arch/arm64/include/asm/mpam_sched.h
index 5978e98212b9..02aea183d6ec 100644
--- a/arch/arm64/include/asm/mpam_sched.h
+++ b/arch/arm64/include/asm/mpam_sched.h
@@ -1,9 +1,13 @@
#ifndef _ASM_ARM64_MPAM_SCHED_H
#define _ASM_ARM64_MPAM_SCHED_H
+#ifdef CONFIG_MPAM
+
#include <linux/sched.h>
#include <linux/jump_label.h>
+#include <asm/mpam.h>
+
/**
* struct intel_pqr_state - State cache for the PQR MSR
* @cur_rmid: The cached Resource Monitoring ID
@@ -28,6 +32,62 @@ struct intel_pqr_state {
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+/*
+ * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
+ *
+ * Following considerations are made so that this has minimal impact
+ * on scheduler hot path:
+ * - This will stay as no-op unless we are running on an Intel SKU
+ * which supports resource control or monitoring and we enable by
+ * mounting the resctrl file system.
+ * - Caches the per cpu CLOSid/RMID values and does the MSR write only
+ * when a task with a different CLOSid/RMID is scheduled in.
+ * - We allocate RMIDs/CLOSids globally in order to keep this as
+ * simple as possible.
+ * Must be called with preemption disabled.
+ */
+static void __mpam_sched_in(void)
+{
+ struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+ u32 partid = state->default_closid;
+ u32 pmg = state->default_rmid;
+
+ /*
+ * If this task has a closid/rmid assigned, use it.
+ * Else use the closid/rmid assigned to this cpu.
+ */
+ if (static_branch_likely(&resctrl_alloc_enable_key)) {
+ if (current->closid)
+ partid = current->closid;
+ }
+
+ if (static_branch_likely(&resctrl_mon_enable_key)) {
+ if (current->rmid)
+ pmg = current->rmid;
+ }
+
+ if (partid != state->cur_closid || pmg != state->cur_rmid) {
+ u64 reg;
+ state->cur_closid = partid;
+ state->cur_rmid = pmg;
+
+ reg = read_sysreg_s(SYS_MPAM0_EL1);
+ reg = reg & (~PARTID_MASK) & partid;
+ reg = reg & (~PMG_MASK) & pmg;
+ write_sysreg_s(reg, SYS_MPAM0_EL1);
+ }
+}
+
+static inline void mpam_sched_in(void)
+{
+ if (static_branch_likely(&resctrl_enable_key))
+ __mpam_sched_in();
+}
+
+#else
+
static inline void mpam_sched_in(void) {}
+#endif /* CONFIG_MPAM */
+
#endif
--
2.31.0