[PATCH 03/11] arm64: gunyah: Add Gunyah hypercalls ABI

From: Elliot Berman
Date: Wed Feb 23 2022 - 18:38:19 EST


Add initial support to perform Gunyah hypercalls. The arm64 ABI for
Gunyah hypercalls generally follows the AAPCS64, and can be summarized:
- Function identifier is passed through the imm operand
- [r0,r7] are parameter and result registers
- [r8-r18] are temporary and saved by the caller (VM)
- [r19-r31] are preserved and saved by the hypervisor

The preprocessor macors for creating the necessary HVC instruction
roughly follows the SMCCC 1.1 implementation in
include/linux/arm-smccc.h.

Signed-off-by: Elliot Berman <quic_eberman@xxxxxxxxxxx>
---
MAINTAINERS | 1 +
arch/arm64/include/asm/gunyah/hypercall.h | 193 ++++++++++++++++++++++
2 files changed, 194 insertions(+)
create mode 100644 arch/arm64/include/asm/gunyah/hypercall.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 6a918f653eac..7e6a8488fa3e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8402,6 +8402,7 @@ L: linux-arm-msm@xxxxxxxxxxxxxxx
S: Maintained
F: Documentation/devicetree/bindings/gunyah/
F: Documentation/virt/gunyah/
+F: arch/arm64/include/asm/gunyah/

H8/300 ARCHITECTURE
M: Yoshinori Sato <ysato@xxxxxxxxxxxxxxxxxxxx>
diff --git a/arch/arm64/include/asm/gunyah/hypercall.h b/arch/arm64/include/asm/gunyah/hypercall.h
new file mode 100644
index 000000000000..626163500e32
--- /dev/null
+++ b/arch/arm64/include/asm/gunyah/hypercall.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __ASM_GH_HYPERCALL_H
+#define __ASM_GH_HYPERCALL_H
+
+#include <linux/types.h>
+
+#define ___gh_count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+
+#define __gh_count_args(...) \
+ ___gh_count_args(_, ## __VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __gh_skip_0(...) __VA_ARGS__
+#define __gh_skip_1(a, ...) __VA_ARGS__
+#define __gh_skip_2(a, b, ...) __VA_ARGS__
+#define __gh_skip_3(a, b, c, ...) __VA_ARGS__
+#define __gh_skip_4(a, b, c, d, ...) __VA_ARGS__
+#define __gh_skip_5(a, b, c, d, e, ...) __VA_ARGS__
+#define __gh_skip_6(a, b, c, d, e, f, ...) __VA_ARGS__
+#define __gh_skip_7(a, b, c, d, e, f, g, ...) __VA_ARGS__
+#define __gh_skip_8(a, b, c, d, e, f, g, h, ...) __VA_ARGS__
+
+#define __gh_declare_arg_0(...)
+
+#define __gh_declare_arg_1(a1, ...) \
+ typeof(a1) __gh_a1 = (a1); \
+ register uintptr_t arg1 asm("r0") = __gh_a1
+
+#define __gh_declare_arg_2(a1, a2, ...) \
+ __gh_declare_arg_1(a1); \
+ typeof(a2) __gh_a2 = (a2); \
+ register uintptr_t arg2 asm("r1") = __gh_a2
+
+#define __gh_declare_arg_3(a1, a2, a3, ...) \
+ __gh_declare_arg_2(a1, a2); \
+ typeof(a3) __gh_a3 = (a3); \
+ register uintptr_t arg3 asm("r2") = __gh_a3
+
+#define __gh_declare_arg_4(a1, a2, a3, a4, ...) \
+ __gh_declare_arg_3(a1, a2, a3); \
+ typeof(a4) __gh_a4 = (a4); \
+ register uintptr_t arg4 asm("r3") = __gh_a4
+
+#define __gh_declare_arg_5(a1, a2, a3, a4, a5, ...) \
+ __gh_declare_arg_4(a1, a2, a3, a4); \
+ typeof(a5) __gh_a5 = (a5); \
+ register uintptr_t arg5 asm("r4") = __gh_a5
+
+#define __gh_declare_arg_6(a1, a2, a3, a4, a5, a6, ...) \
+ __gh_declare_arg_5(a1, a2, a3, a4, a5); \
+ typeof(a6) __gh_a6 = (a6); \
+ register uintptr_t arg6 asm("r5") = __gh_a6
+
+#define __gh_declare_arg_7(a1, a2, a3, a4, a5, a6, a7, ...) \
+ __gh_declare_arg_6(a1, a2, a3, a4, a5, a6); \
+ typeof(a7) __gh_a7 = (a7); \
+ register uintptr_t arg7 asm("r6") = __gh_a7
+
+#define __gh_declare_arg_8(a1, a2, a3, a4, a5, a6, a7, a8, ...) \
+ __gh_declare_arg_7(a1, a2, a3, a4, a5, a6, a7); \
+ typeof(a8) __gh_a8 = (a8); \
+ register uintptr_t arg8 asm("r7") = __gh_a8
+
+#define ___gh_declare_args(nargs) __gh_declare_arg_ ## nargs
+#define __gh_declare_args(nargs) ___gh_declare_args(nargs)
+#define _gh_declare_args(nargs, ...) __gh_declare_args(nargs)(__VA_ARGS__)
+
+#define __gh_constraint_arg_0
+#define __gh_constraint_arg_1 "r" (arg1),
+#define __gh_constraint_arg_2 __gh_constraint_arg_1 "r" (arg2),
+#define __gh_constraint_arg_3 __gh_constraint_arg_2 "r" (arg3),
+#define __gh_constraint_arg_4 __gh_constraint_arg_3 "r" (arg4),
+#define __gh_constraint_arg_5 __gh_constraint_arg_4 "r" (arg5),
+#define __gh_constraint_arg_6 __gh_constraint_arg_5 "r" (arg6),
+#define __gh_constraint_arg_7 __gh_constraint_arg_6 "r" (arg7),
+#define __gh_constraint_arg_8 __gh_constraint_arg_7 "r" (arg8),
+
+#define _gh_constraint_args(nargs) __gh_constraint_arg_ ## nargs
+
+#define __gh_to_res(nargs, ...) __gh_skip_ ## nargs (__VA_ARGS__)
+
+#define __gh_declare_res_0
+
+#define __gh_declare_res_1 \
+ register uintptr_t res1 asm("r0")
+
+#define __gh_declare_res_2 \
+ __gh_declare_res_1; \
+ register uintptr_t res2 asm("r1")
+
+#define __gh_declare_res_3 \
+ __gh_declare_res_2; \
+ register uintptr_t res3 asm("r2")
+
+#define __gh_declare_res_4 \
+ __gh_declare_res_3; \
+ register uintptr_t res4 asm("r3")
+
+#define __gh_declare_res_5 \
+ __gh_declare_res_4; \
+ register uintptr_t res5 asm("r4")
+
+#define __gh_declare_res_6 \
+ __gh_declare_res_5; \
+ register uintptr_t res6 asm("r5")
+
+#define __gh_declare_res_7 \
+ __gh_declare_res_6; \
+ register uintptr_t res7 asm("r6")
+
+#define __gh_declare_res_8 \
+ __gh_declare_res_7; \
+ register uintptr_t res8 asm("r7")
+
+#define ___gh_declare_res(nargs) __gh_declare_res_ ## nargs
+#define __gh_declare_res(nargs) ___gh_declare_res(nargs)
+#define _gh_declare_res(...) __gh_declare_res(__gh_count_args(__VA_ARGS__))
+
+#define __gh_constraint_res_0
+#define __gh_constraint_res_1 "=r" (res1)
+#define __gh_constraint_res_2 __gh_constraint_res_1, "=r" (res2)
+#define __gh_constraint_res_3 __gh_constraint_res_2, "=r" (res3)
+#define __gh_constraint_res_4 __gh_constraint_res_3, "=r" (res4)
+#define __gh_constraint_res_5 __gh_constraint_res_4, "=r" (res5)
+#define __gh_constraint_res_6 __gh_constraint_res_5, "=r" (res6)
+#define __gh_constraint_res_7 __gh_constraint_res_6, "=r" (res7)
+#define __gh_constraint_res_8 __gh_constraint_res_7, "=r" (res8)
+
+#define ___gh_constraint_res(nargs) __gh_constraint_res_ ## nargs
+#define __gh_constraint_res(nargs) ___gh_constraint_res(nargs)
+#define _gh_constraint_res(...) \
+ __gh_constraint_res(__gh_count_args(__VA_ARGS__))
+
+#define __gh_assign_res_0(...)
+
+#define __gh_assign_res_1(r1) \
+ r1 = res1;
+
+#define __gh_assign_res_2(r1, r2) \
+ __gh_assign_res_1(r1); \
+ r2 = res2
+
+#define __gh_assign_res_3(r1, r2, r3) \
+ __gh_assign_res_2(r1, r2); \
+ r3 = res3
+
+#define __gh_assign_res_4(r1, r2, r3, r4) \
+ __gh_assign_res_3(r1, r2, r3); \
+ r4 = res4
+
+#define __gh_assign_res_5(r1, r2, r3, r4, r5) \
+ __gh_assign_res_4(r1, r2, r3, r4); \
+ r5 = res5
+
+#define __gh_assign_res_6(r1, r2, r3, r4, r5, r6) \
+ __gh_assign_res_5(r1, r2, r3, r4, r5); \
+ r6 = res6
+
+#define __gh_assign_res_7(r1, r2, r3, r4, r5, r6, r7) \
+ __gh_assign_res_6(r1, r2, r3, r4, r5, r6); \
+ r7 = res7
+
+#define __gh_assign_res_8(r1, r2, r3, r4, r5, r6, r7, r8) \
+ __gh_assign_res_7(r1, r2, r3, r4, r5, r6, r7); \
+ r8 = res8
+
+#define ___gh_assign_res(nargs) __gh_assign_res_ ## nargs
+#define __gh_assign_res(nargs) ___gh_assign_res(nargs)
+#define _gh_assign_res(...) __gh_assign_res(__gh_count_args(__VA_ARGS__))(__VA_ARGS__)
+
+/**
+ * arch_gh_hypercall() - Performs an AArch64-specific call into hypervisor using Gunyah ABI
+ * @hcall_num: Hypercall function ID to invoke
+ * @nargs: Number of input arguments
+ * @...: First nargs are the input arguments. Remaining arguments are output variables.
+ */
+#define arch_gh_hypercall(hcall_num, nargs, ...) \
+ do { \
+ _gh_declare_res(__gh_to_res(nargs, __VA_ARGS__)); \
+ _gh_declare_args(nargs, __VA_ARGS__); \
+ asm volatile( \
+ "hvc %[num]\n" \
+ : _gh_constraint_res(__gh_to_res(nargs, __VA_ARGS__)) \
+ : _gh_constraint_args(nargs) \
+ [num] "i" (hcall_num) \
+ : "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \
+ "memory"); \
+ _gh_assign_res(__gh_to_res(nargs, __VA_ARGS__)); \
+ } while (0)
+
+#endif
--
2.25.1