1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
|
/* SPDX-License-Identifier: MIT
*
* xen.h
*
* Guest OS interface to Xen.
*
* Copyright (c) 2004, K A Fraser
*/
#ifndef __XEN_PUBLIC_XEN_H__
#define __XEN_PUBLIC_XEN_H__
#include <xen/arm/interface.h>
/*
* XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
*/
/*
* x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
* EAX = return value
* (argument registers may be clobbered on return)
* x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
* RAX = return value
* (argument registers not clobbered on return; RCX, R11 are)
*/
#define __HYPERVISOR_set_trap_table 0
#define __HYPERVISOR_mmu_update 1
#define __HYPERVISOR_set_gdt 2
#define __HYPERVISOR_stack_switch 3
#define __HYPERVISOR_set_callbacks 4
#define __HYPERVISOR_fpu_taskswitch 5
#define __HYPERVISOR_sched_op_compat 6
#define __HYPERVISOR_platform_op 7
#define __HYPERVISOR_set_debugreg 8
#define __HYPERVISOR_get_debugreg 9
#define __HYPERVISOR_update_descriptor 10
#define __HYPERVISOR_memory_op 12
#define __HYPERVISOR_multicall 13
#define __HYPERVISOR_update_va_mapping 14
#define __HYPERVISOR_set_timer_op 15
#define __HYPERVISOR_event_channel_op_compat 16
#define __HYPERVISOR_xen_version 17
#define __HYPERVISOR_console_io 18
#define __HYPERVISOR_physdev_op_compat 19
#define __HYPERVISOR_grant_table_op 20
#define __HYPERVISOR_vm_assist 21
#define __HYPERVISOR_update_va_mapping_otherdomain 22
#define __HYPERVISOR_iret 23 /* x86 only */
#define __HYPERVISOR_vcpu_op 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
#define __HYPERVISOR_xsm_op 27
#define __HYPERVISOR_nmi_op 28
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
#define __HYPERVISOR_xenoprof_op 31
#define __HYPERVISOR_event_channel_op 32
#define __HYPERVISOR_physdev_op 33
#define __HYPERVISOR_hvm_op 34
#define __HYPERVISOR_sysctl 35
#define __HYPERVISOR_domctl 36
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
#define __HYPERVISOR_xenpmu_op 40
#define __HYPERVISOR_dm_op 41
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
#define __HYPERVISOR_arch_1 49
#define __HYPERVISOR_arch_2 50
#define __HYPERVISOR_arch_3 51
#define __HYPERVISOR_arch_4 52
#define __HYPERVISOR_arch_5 53
#define __HYPERVISOR_arch_6 54
#define __HYPERVISOR_arch_7 55
#ifndef __ASSEMBLY__
typedef u16 domid_t;
/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
#define DOMID_FIRST_RESERVED (0x7FF0U)
/* DOMID_SELF is used in certain contexts to refer to oneself. */
#define DOMID_SELF (0x7FF0U)
/*
* DOMID_IO is used to restrict page-table updates to mapping I/O memory.
* Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
* is useful to ensure that no mappings to the OS's own heap are accidentally
* installed. (e.g., in Linux this could cause havoc as reference counts
* aren't adjusted on the I/O-mapping code path).
* This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
* be specified by any calling domain.
*/
#define DOMID_IO (0x7FF1U)
/*
* DOMID_XEN is used to allow privileged domains to map restricted parts of
* Xen's heap space (e.g., the machine_to_phys table).
* This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
* the caller is privileged.
*/
#define DOMID_XEN (0x7FF2U)
/* DOMID_COW is used as the owner of sharable pages */
#define DOMID_COW (0x7FF3U)
/* DOMID_INVALID is used to identify pages with unknown owner. */
#define DOMID_INVALID (0x7FF4U)
/* Idle domain. */
#define DOMID_IDLE (0x7FFFU)
struct vcpu_info {
/*
* 'evtchn_upcall_pending' is written non-zero by Xen to indicate
* a pending notification for a particular VCPU. It is then cleared
* by the guest OS /before/ checking for pending work, thus avoiding
* a set-and-check race. Note that the mask is only accessed by Xen
* on the CPU that is currently hosting the VCPU. This means that the
* pending and mask flags can be updated by the guest without special
* synchronisation (i.e., no need for the x86 LOCK prefix).
* This may seem suboptimal because if the pending flag is set by
* a different CPU then an IPI may be scheduled even when the mask
* is set. However, note:
* 1. The task of 'interrupt holdoff' is covered by the per-event-
* channel mask bits. A 'noisy' event that is continually being
* triggered can be masked at source at this very precise
* granularity.
* 2. The main purpose of the per-VCPU mask is therefore to restrict
* reentrant execution: whether for concurrency control, or to
* prevent unbounded stack usage. Whatever the purpose, we expect
* that the mask will be asserted only for short periods at a time,
* and so the likelihood of a 'spurious' IPI is suitably small.
* The mask is read before making an event upcall to the guest: a
* non-zero mask therefore guarantees that the VCPU will not receive
* an upcall activation. The mask is cleared when the VCPU requests
* to block: this avoids wakeup-waiting races.
*/
u8 evtchn_upcall_pending;
u8 evtchn_upcall_mask;
xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */
/*
* Xen/kernel shared data -- pointer provided in start_info.
* NB. We expect that this struct is smaller than a page.
*/
struct shared_info {
struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
/*
* A domain can create "event channels" on which it can send and receive
* asynchronous event notifications. There are three classes of event that
* are delivered by this mechanism:
* 1. Bi-directional inter- and intra-domain connections. Domains must
* arrange out-of-band to set up a connection (usually by allocating
* an unbound 'listener' port and avertising that via a storage service
* such as xenstore).
* 2. Physical interrupts. A domain with suitable hardware-access
* privileges can bind an event-channel port to a physical interrupt
* source.
* 3. Virtual interrupts ('events'). A domain can bind an event-channel
* port to a virtual interrupt source, such as the virtual-timer
* device or the emergency console.
*
* Event channels are addressed by a "port index". Each channel is
* associated with two bits of information:
* 1. PENDING -- notifies the domain that there is a pending notification
* to be processed. This bit is cleared by the guest.
* 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
* will cause an asynchronous upcall to be scheduled. This bit is only
* updated by the guest. It is read-only within Xen. If a channel
* becomes pending while the channel is masked then the 'edge' is lost
* (i.e., when the channel is unmasked, the guest must manually handle
* pending notifications as no upcall will be scheduled by Xen).
*
* To expedite scanning of pending notifications, any 0->1 pending
* transition on an unmasked channel causes a corresponding bit in a
* per-vcpu selector word to be set. Each bit in the selector covers a
* 'C long' in the PENDING bitfield array.
*/
xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
* their gettimeofday() syscall on this wallclock-base value.
*/
struct pvclock_wall_clock wc;
struct arch_shared_info arch;
};
#else /* __ASSEMBLY__ */
/* In assembly code we cannot use C numeric constant suffixes. */
#define mk_unsigned_long(x) x
#endif /* !__ASSEMBLY__ */
#endif /* __XEN_PUBLIC_XEN_H__ */
|