1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Ultravisor Interfaces
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2019
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author(s):
8*4882a593Smuzhiyun * Vasily Gorbik <gor@linux.ibm.com>
9*4882a593Smuzhiyun * Janosch Frank <frankja@linux.ibm.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #ifndef _ASM_S390_UV_H
12*4882a593Smuzhiyun #define _ASM_S390_UV_H
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun #include <linux/errno.h>
16*4882a593Smuzhiyun #include <linux/bug.h>
17*4882a593Smuzhiyun #include <linux/sched.h>
18*4882a593Smuzhiyun #include <asm/page.h>
19*4882a593Smuzhiyun #include <asm/gmap.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define UVC_RC_EXECUTED 0x0001
22*4882a593Smuzhiyun #define UVC_RC_INV_CMD 0x0002
23*4882a593Smuzhiyun #define UVC_RC_INV_STATE 0x0003
24*4882a593Smuzhiyun #define UVC_RC_INV_LEN 0x0005
25*4882a593Smuzhiyun #define UVC_RC_NO_RESUME 0x0007
26*4882a593Smuzhiyun #define UVC_RC_NEED_DESTROY 0x8000
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define UVC_CMD_QUI 0x0001
29*4882a593Smuzhiyun #define UVC_CMD_INIT_UV 0x000f
30*4882a593Smuzhiyun #define UVC_CMD_CREATE_SEC_CONF 0x0100
31*4882a593Smuzhiyun #define UVC_CMD_DESTROY_SEC_CONF 0x0101
32*4882a593Smuzhiyun #define UVC_CMD_CREATE_SEC_CPU 0x0120
33*4882a593Smuzhiyun #define UVC_CMD_DESTROY_SEC_CPU 0x0121
34*4882a593Smuzhiyun #define UVC_CMD_CONV_TO_SEC_STOR 0x0200
35*4882a593Smuzhiyun #define UVC_CMD_CONV_FROM_SEC_STOR 0x0201
36*4882a593Smuzhiyun #define UVC_CMD_DESTR_SEC_STOR 0x0202
37*4882a593Smuzhiyun #define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300
38*4882a593Smuzhiyun #define UVC_CMD_UNPACK_IMG 0x0301
39*4882a593Smuzhiyun #define UVC_CMD_VERIFY_IMG 0x0302
40*4882a593Smuzhiyun #define UVC_CMD_CPU_RESET 0x0310
41*4882a593Smuzhiyun #define UVC_CMD_CPU_RESET_INITIAL 0x0311
42*4882a593Smuzhiyun #define UVC_CMD_PREPARE_RESET 0x0320
43*4882a593Smuzhiyun #define UVC_CMD_CPU_RESET_CLEAR 0x0321
44*4882a593Smuzhiyun #define UVC_CMD_CPU_SET_STATE 0x0330
45*4882a593Smuzhiyun #define UVC_CMD_SET_UNSHARE_ALL 0x0340
46*4882a593Smuzhiyun #define UVC_CMD_PIN_PAGE_SHARED 0x0341
47*4882a593Smuzhiyun #define UVC_CMD_UNPIN_PAGE_SHARED 0x0342
48*4882a593Smuzhiyun #define UVC_CMD_SET_SHARED_ACCESS 0x1000
49*4882a593Smuzhiyun #define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* Bits in installed uv calls */
52*4882a593Smuzhiyun enum uv_cmds_inst {
53*4882a593Smuzhiyun BIT_UVC_CMD_QUI = 0,
54*4882a593Smuzhiyun BIT_UVC_CMD_INIT_UV = 1,
55*4882a593Smuzhiyun BIT_UVC_CMD_CREATE_SEC_CONF = 2,
56*4882a593Smuzhiyun BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
57*4882a593Smuzhiyun BIT_UVC_CMD_CREATE_SEC_CPU = 4,
58*4882a593Smuzhiyun BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
59*4882a593Smuzhiyun BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
60*4882a593Smuzhiyun BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
61*4882a593Smuzhiyun BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
62*4882a593Smuzhiyun BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
63*4882a593Smuzhiyun BIT_UVC_CMD_SET_SEC_PARMS = 11,
64*4882a593Smuzhiyun BIT_UVC_CMD_UNPACK_IMG = 13,
65*4882a593Smuzhiyun BIT_UVC_CMD_VERIFY_IMG = 14,
66*4882a593Smuzhiyun BIT_UVC_CMD_CPU_RESET = 15,
67*4882a593Smuzhiyun BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
68*4882a593Smuzhiyun BIT_UVC_CMD_CPU_SET_STATE = 17,
69*4882a593Smuzhiyun BIT_UVC_CMD_PREPARE_RESET = 18,
70*4882a593Smuzhiyun BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
71*4882a593Smuzhiyun BIT_UVC_CMD_UNSHARE_ALL = 20,
72*4882a593Smuzhiyun BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
73*4882a593Smuzhiyun BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun enum uv_feat_ind {
77*4882a593Smuzhiyun BIT_UV_FEAT_MISC = 0,
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun struct uv_cb_header {
81*4882a593Smuzhiyun u16 len;
82*4882a593Smuzhiyun u16 cmd; /* Command Code */
83*4882a593Smuzhiyun u16 rc; /* Response Code */
84*4882a593Smuzhiyun u16 rrc; /* Return Reason Code */
85*4882a593Smuzhiyun } __packed __aligned(8);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Query Ultravisor Information */
88*4882a593Smuzhiyun struct uv_cb_qui {
89*4882a593Smuzhiyun struct uv_cb_header header;
90*4882a593Smuzhiyun u64 reserved08;
91*4882a593Smuzhiyun u64 inst_calls_list[4];
92*4882a593Smuzhiyun u64 reserved30[2];
93*4882a593Smuzhiyun u64 uv_base_stor_len;
94*4882a593Smuzhiyun u64 reserved48;
95*4882a593Smuzhiyun u64 conf_base_phys_stor_len;
96*4882a593Smuzhiyun u64 conf_base_virt_stor_len;
97*4882a593Smuzhiyun u64 conf_virt_var_stor_len;
98*4882a593Smuzhiyun u64 cpu_stor_len;
99*4882a593Smuzhiyun u32 reserved70[3];
100*4882a593Smuzhiyun u32 max_num_sec_conf;
101*4882a593Smuzhiyun u64 max_guest_stor_addr;
102*4882a593Smuzhiyun u8 reserved88[158 - 136];
103*4882a593Smuzhiyun u16 max_guest_cpu_id;
104*4882a593Smuzhiyun u64 uv_feature_indications;
105*4882a593Smuzhiyun u8 reserveda0[200 - 168];
106*4882a593Smuzhiyun } __packed __aligned(8);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Initialize Ultravisor */
109*4882a593Smuzhiyun struct uv_cb_init {
110*4882a593Smuzhiyun struct uv_cb_header header;
111*4882a593Smuzhiyun u64 reserved08[2];
112*4882a593Smuzhiyun u64 stor_origin;
113*4882a593Smuzhiyun u64 stor_len;
114*4882a593Smuzhiyun u64 reserved28[4];
115*4882a593Smuzhiyun } __packed __aligned(8);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Create Guest Configuration */
118*4882a593Smuzhiyun struct uv_cb_cgc {
119*4882a593Smuzhiyun struct uv_cb_header header;
120*4882a593Smuzhiyun u64 reserved08[2];
121*4882a593Smuzhiyun u64 guest_handle;
122*4882a593Smuzhiyun u64 conf_base_stor_origin;
123*4882a593Smuzhiyun u64 conf_virt_stor_origin;
124*4882a593Smuzhiyun u64 reserved30;
125*4882a593Smuzhiyun u64 guest_stor_origin;
126*4882a593Smuzhiyun u64 guest_stor_len;
127*4882a593Smuzhiyun u64 guest_sca;
128*4882a593Smuzhiyun u64 guest_asce;
129*4882a593Smuzhiyun u64 reserved58[5];
130*4882a593Smuzhiyun } __packed __aligned(8);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Create Secure CPU */
133*4882a593Smuzhiyun struct uv_cb_csc {
134*4882a593Smuzhiyun struct uv_cb_header header;
135*4882a593Smuzhiyun u64 reserved08[2];
136*4882a593Smuzhiyun u64 cpu_handle;
137*4882a593Smuzhiyun u64 guest_handle;
138*4882a593Smuzhiyun u64 stor_origin;
139*4882a593Smuzhiyun u8 reserved30[6];
140*4882a593Smuzhiyun u16 num;
141*4882a593Smuzhiyun u64 state_origin;
142*4882a593Smuzhiyun u64 reserved40[4];
143*4882a593Smuzhiyun } __packed __aligned(8);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* Convert to Secure */
146*4882a593Smuzhiyun struct uv_cb_cts {
147*4882a593Smuzhiyun struct uv_cb_header header;
148*4882a593Smuzhiyun u64 reserved08[2];
149*4882a593Smuzhiyun u64 guest_handle;
150*4882a593Smuzhiyun u64 gaddr;
151*4882a593Smuzhiyun } __packed __aligned(8);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Convert from Secure / Pin Page Shared */
154*4882a593Smuzhiyun struct uv_cb_cfs {
155*4882a593Smuzhiyun struct uv_cb_header header;
156*4882a593Smuzhiyun u64 reserved08[2];
157*4882a593Smuzhiyun u64 paddr;
158*4882a593Smuzhiyun } __packed __aligned(8);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Set Secure Config Parameter */
161*4882a593Smuzhiyun struct uv_cb_ssc {
162*4882a593Smuzhiyun struct uv_cb_header header;
163*4882a593Smuzhiyun u64 reserved08[2];
164*4882a593Smuzhiyun u64 guest_handle;
165*4882a593Smuzhiyun u64 sec_header_origin;
166*4882a593Smuzhiyun u32 sec_header_len;
167*4882a593Smuzhiyun u32 reserved2c;
168*4882a593Smuzhiyun u64 reserved30[4];
169*4882a593Smuzhiyun } __packed __aligned(8);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Unpack */
172*4882a593Smuzhiyun struct uv_cb_unp {
173*4882a593Smuzhiyun struct uv_cb_header header;
174*4882a593Smuzhiyun u64 reserved08[2];
175*4882a593Smuzhiyun u64 guest_handle;
176*4882a593Smuzhiyun u64 gaddr;
177*4882a593Smuzhiyun u64 tweak[2];
178*4882a593Smuzhiyun u64 reserved38[3];
179*4882a593Smuzhiyun } __packed __aligned(8);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun #define PV_CPU_STATE_OPR 1
182*4882a593Smuzhiyun #define PV_CPU_STATE_STP 2
183*4882a593Smuzhiyun #define PV_CPU_STATE_CHKSTP 3
184*4882a593Smuzhiyun #define PV_CPU_STATE_OPR_LOAD 5
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun struct uv_cb_cpu_set_state {
187*4882a593Smuzhiyun struct uv_cb_header header;
188*4882a593Smuzhiyun u64 reserved08[2];
189*4882a593Smuzhiyun u64 cpu_handle;
190*4882a593Smuzhiyun u8 reserved20[7];
191*4882a593Smuzhiyun u8 state;
192*4882a593Smuzhiyun u64 reserved28[5];
193*4882a593Smuzhiyun };
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * A common UV call struct for calls that take no payload
197*4882a593Smuzhiyun * Examples:
198*4882a593Smuzhiyun * Destroy cpu/config
199*4882a593Smuzhiyun * Verify
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun struct uv_cb_nodata {
202*4882a593Smuzhiyun struct uv_cb_header header;
203*4882a593Smuzhiyun u64 reserved08[2];
204*4882a593Smuzhiyun u64 handle;
205*4882a593Smuzhiyun u64 reserved20[4];
206*4882a593Smuzhiyun } __packed __aligned(8);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* Set Shared Access */
209*4882a593Smuzhiyun struct uv_cb_share {
210*4882a593Smuzhiyun struct uv_cb_header header;
211*4882a593Smuzhiyun u64 reserved08[3];
212*4882a593Smuzhiyun u64 paddr;
213*4882a593Smuzhiyun u64 reserved28;
214*4882a593Smuzhiyun } __packed __aligned(8);
215*4882a593Smuzhiyun
__uv_call(unsigned long r1,unsigned long r2)216*4882a593Smuzhiyun static inline int __uv_call(unsigned long r1, unsigned long r2)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun int cc;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun asm volatile(
221*4882a593Smuzhiyun " .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
222*4882a593Smuzhiyun " ipm %[cc]\n"
223*4882a593Smuzhiyun " srl %[cc],28\n"
224*4882a593Smuzhiyun : [cc] "=d" (cc)
225*4882a593Smuzhiyun : [r1] "a" (r1), [r2] "a" (r2)
226*4882a593Smuzhiyun : "memory", "cc");
227*4882a593Smuzhiyun return cc;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
uv_call(unsigned long r1,unsigned long r2)230*4882a593Smuzhiyun static inline int uv_call(unsigned long r1, unsigned long r2)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun int cc;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun do {
235*4882a593Smuzhiyun cc = __uv_call(r1, r2);
236*4882a593Smuzhiyun } while (cc > 1);
237*4882a593Smuzhiyun return cc;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Low level uv_call that avoids stalls for long running busy conditions */
uv_call_sched(unsigned long r1,unsigned long r2)241*4882a593Smuzhiyun static inline int uv_call_sched(unsigned long r1, unsigned long r2)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun int cc;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun do {
246*4882a593Smuzhiyun cc = __uv_call(r1, r2);
247*4882a593Smuzhiyun cond_resched();
248*4882a593Smuzhiyun } while (cc > 1);
249*4882a593Smuzhiyun return cc;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun * special variant of uv_call that only transports the cpu or guest
254*4882a593Smuzhiyun * handle and the command, like destroy or verify.
255*4882a593Smuzhiyun */
uv_cmd_nodata(u64 handle,u16 cmd,u16 * rc,u16 * rrc)256*4882a593Smuzhiyun static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct uv_cb_nodata uvcb = {
259*4882a593Smuzhiyun .header.cmd = cmd,
260*4882a593Smuzhiyun .header.len = sizeof(uvcb),
261*4882a593Smuzhiyun .handle = handle,
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun int cc;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
266*4882a593Smuzhiyun cc = uv_call_sched(0, (u64)&uvcb);
267*4882a593Smuzhiyun *rc = uvcb.header.rc;
268*4882a593Smuzhiyun *rrc = uvcb.header.rrc;
269*4882a593Smuzhiyun return cc ? -EINVAL : 0;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun struct uv_info {
273*4882a593Smuzhiyun unsigned long inst_calls_list[4];
274*4882a593Smuzhiyun unsigned long uv_base_stor_len;
275*4882a593Smuzhiyun unsigned long guest_base_stor_len;
276*4882a593Smuzhiyun unsigned long guest_virt_base_stor_len;
277*4882a593Smuzhiyun unsigned long guest_virt_var_stor_len;
278*4882a593Smuzhiyun unsigned long guest_cpu_stor_len;
279*4882a593Smuzhiyun unsigned long max_sec_stor_addr;
280*4882a593Smuzhiyun unsigned int max_num_sec_conf;
281*4882a593Smuzhiyun unsigned short max_guest_cpu_id;
282*4882a593Smuzhiyun unsigned long uv_feature_indications;
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun extern struct uv_info uv_info;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
288*4882a593Smuzhiyun extern int prot_virt_guest;
289*4882a593Smuzhiyun
is_prot_virt_guest(void)290*4882a593Smuzhiyun static inline int is_prot_virt_guest(void)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun return prot_virt_guest;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
share(unsigned long addr,u16 cmd)295*4882a593Smuzhiyun static inline int share(unsigned long addr, u16 cmd)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct uv_cb_share uvcb = {
298*4882a593Smuzhiyun .header.cmd = cmd,
299*4882a593Smuzhiyun .header.len = sizeof(uvcb),
300*4882a593Smuzhiyun .paddr = addr
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (!is_prot_virt_guest())
304*4882a593Smuzhiyun return -EOPNOTSUPP;
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * Sharing is page wise, if we encounter addresses that are
307*4882a593Smuzhiyun * not page aligned, we assume something went wrong. If
308*4882a593Smuzhiyun * malloced structs are passed to this function, we could leak
309*4882a593Smuzhiyun * data to the hypervisor.
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun BUG_ON(addr & ~PAGE_MASK);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (!uv_call(0, (u64)&uvcb))
314*4882a593Smuzhiyun return 0;
315*4882a593Smuzhiyun return -EINVAL;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * Guest 2 request to the Ultravisor to make a page shared with the
320*4882a593Smuzhiyun * hypervisor for IO.
321*4882a593Smuzhiyun *
322*4882a593Smuzhiyun * @addr: Real or absolute address of the page to be shared
323*4882a593Smuzhiyun */
uv_set_shared(unsigned long addr)324*4882a593Smuzhiyun static inline int uv_set_shared(unsigned long addr)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun return share(addr, UVC_CMD_SET_SHARED_ACCESS);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * Guest 2 request to the Ultravisor to make a page unshared.
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * @addr: Real or absolute address of the page to be unshared
333*4882a593Smuzhiyun */
uv_remove_shared(unsigned long addr)334*4882a593Smuzhiyun static inline int uv_remove_shared(unsigned long addr)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun #else
340*4882a593Smuzhiyun #define is_prot_virt_guest() 0
uv_set_shared(unsigned long addr)341*4882a593Smuzhiyun static inline int uv_set_shared(unsigned long addr) { return 0; }
uv_remove_shared(unsigned long addr)342*4882a593Smuzhiyun static inline int uv_remove_shared(unsigned long addr) { return 0; }
343*4882a593Smuzhiyun #endif
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_KVM)
346*4882a593Smuzhiyun extern int prot_virt_host;
347*4882a593Smuzhiyun
is_prot_virt_host(void)348*4882a593Smuzhiyun static inline int is_prot_virt_host(void)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun return prot_virt_host;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
354*4882a593Smuzhiyun int uv_destroy_page(unsigned long paddr);
355*4882a593Smuzhiyun int uv_convert_from_secure(unsigned long paddr);
356*4882a593Smuzhiyun int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun void setup_uv(void);
359*4882a593Smuzhiyun void adjust_to_uv_max(unsigned long *vmax);
360*4882a593Smuzhiyun #else
361*4882a593Smuzhiyun #define is_prot_virt_host() 0
setup_uv(void)362*4882a593Smuzhiyun static inline void setup_uv(void) {}
adjust_to_uv_max(unsigned long * vmax)363*4882a593Smuzhiyun static inline void adjust_to_uv_max(unsigned long *vmax) {}
364*4882a593Smuzhiyun
uv_destroy_page(unsigned long paddr)365*4882a593Smuzhiyun static inline int uv_destroy_page(unsigned long paddr)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
uv_convert_from_secure(unsigned long paddr)370*4882a593Smuzhiyun static inline int uv_convert_from_secure(unsigned long paddr)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun return 0;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun #endif
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
377*4882a593Smuzhiyun void uv_query_info(void);
378*4882a593Smuzhiyun #else
uv_query_info(void)379*4882a593Smuzhiyun static inline void uv_query_info(void) {}
380*4882a593Smuzhiyun #endif
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun #endif /* _ASM_S390_UV_H */
383