1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* |
3 | * Copyright 2021 VMware, Inc., Palo Alto, CA., USA |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice (including the |
14 | * next paragraph) shall be included in all copies or substantial portions |
15 | * of the Software. |
16 | * |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * |
25 | */ |
26 | #ifndef _VMWGFX_MSG_ARM64_H |
27 | #define _VMWGFX_MSG_ARM64_H |
28 | |
29 | #if defined(__aarch64__) |
30 | |
31 | #define VMWARE_HYPERVISOR_PORT 0x5658 |
32 | #define VMWARE_HYPERVISOR_PORT_HB 0x5659 |
33 | |
34 | #define VMWARE_HYPERVISOR_HB BIT(0) |
35 | #define VMWARE_HYPERVISOR_OUT BIT(1) |
36 | |
37 | #define X86_IO_MAGIC 0x86 |
38 | |
39 | #define X86_IO_W7_SIZE_SHIFT 0 |
40 | #define X86_IO_W7_SIZE_MASK (0x3 << X86_IO_W7_SIZE_SHIFT) |
41 | #define X86_IO_W7_DIR (1 << 2) |
42 | #define X86_IO_W7_WITH (1 << 3) |
43 | #define X86_IO_W7_STR (1 << 4) |
44 | #define X86_IO_W7_DF (1 << 5) |
45 | #define X86_IO_W7_IMM_SHIFT 5 |
46 | #define X86_IO_W7_IMM_MASK (0xff << X86_IO_W7_IMM_SHIFT) |
47 | |
48 | static inline void vmw_port(unsigned long cmd, unsigned long in_ebx, |
49 | unsigned long in_si, unsigned long in_di, |
50 | unsigned long flags, unsigned long magic, |
51 | unsigned long *eax, unsigned long *ebx, |
52 | unsigned long *ecx, unsigned long *edx, |
53 | unsigned long *si, unsigned long *di) |
54 | { |
55 | register u64 x0 asm("x0" ) = magic; |
56 | register u64 x1 asm("x1" ) = in_ebx; |
57 | register u64 x2 asm("x2" ) = cmd; |
58 | register u64 x3 asm("x3" ) = flags | VMWARE_HYPERVISOR_PORT; |
59 | register u64 x4 asm("x4" ) = in_si; |
60 | register u64 x5 asm("x5" ) = in_di; |
61 | |
62 | register u64 x7 asm("x7" ) = ((u64)X86_IO_MAGIC << 32) | |
63 | X86_IO_W7_WITH | |
64 | X86_IO_W7_DIR | |
65 | (2 << X86_IO_W7_SIZE_SHIFT); |
66 | |
67 | asm volatile("mrs xzr, mdccsr_el0 \n\t" |
68 | : "+r" (x0), "+r" (x1), "+r" (x2), |
69 | "+r" (x3), "+r" (x4), "+r" (x5) |
70 | : "r" (x7) |
71 | :); |
72 | *eax = x0; |
73 | *ebx = x1; |
74 | *ecx = x2; |
75 | *edx = x3; |
76 | *si = x4; |
77 | *di = x5; |
78 | } |
79 | |
80 | static inline void vmw_port_hb(unsigned long cmd, unsigned long in_ecx, |
81 | unsigned long in_si, unsigned long in_di, |
82 | unsigned long flags, unsigned long magic, |
83 | unsigned long bp, u32 w7dir, |
84 | unsigned long *eax, unsigned long *ebx, |
85 | unsigned long *ecx, unsigned long *edx, |
86 | unsigned long *si, unsigned long *di) |
87 | { |
88 | register u64 x0 asm("x0" ) = magic; |
89 | register u64 x1 asm("x1" ) = cmd; |
90 | register u64 x2 asm("x2" ) = in_ecx; |
91 | register u64 x3 asm("x3" ) = flags | VMWARE_HYPERVISOR_PORT_HB; |
92 | register u64 x4 asm("x4" ) = in_si; |
93 | register u64 x5 asm("x5" ) = in_di; |
94 | register u64 x6 asm("x6" ) = bp; |
95 | register u64 x7 asm("x7" ) = ((u64)X86_IO_MAGIC << 32) | |
96 | X86_IO_W7_STR | |
97 | X86_IO_W7_WITH | |
98 | w7dir; |
99 | |
100 | asm volatile("mrs xzr, mdccsr_el0 \n\t" |
101 | : "+r" (x0), "+r" (x1), "+r" (x2), |
102 | "+r" (x3), "+r" (x4), "+r" (x5) |
103 | : "r" (x6), "r" (x7) |
104 | :); |
105 | *eax = x0; |
106 | *ebx = x1; |
107 | *ecx = x2; |
108 | *edx = x3; |
109 | *si = x4; |
110 | *di = x5; |
111 | } |
112 | |
113 | #define VMW_PORT(cmd, in_ebx, in_si, in_di, flags, magic, eax, ebx, ecx, edx, \ |
114 | si, di) \ |
115 | vmw_port(cmd, in_ebx, in_si, in_di, flags, magic, &eax, &ebx, &ecx, \ |
116 | &edx, &si, &di) |
117 | |
118 | #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \ |
119 | ecx, edx, si, di) \ |
120 | vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \ |
121 | 0, &eax, &ebx, &ecx, &edx, &si, &di) |
122 | |
123 | #define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \ |
124 | ecx, edx, si, di) \ |
125 | vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \ |
126 | X86_IO_W7_DIR, &eax, &ebx, &ecx, &edx, &si, &di) |
127 | |
128 | #endif |
129 | |
130 | #endif /* _VMWGFX_MSG_ARM64_H */ |
131 | |