xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/powernv/vas-window.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2016-17 IBM Corp.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #define pr_fmt(fmt) "vas: " fmt
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/mutex.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/log2.h>
13*4882a593Smuzhiyun #include <linux/rcupdate.h>
14*4882a593Smuzhiyun #include <linux/cred.h>
15*4882a593Smuzhiyun #include <linux/sched/mm.h>
16*4882a593Smuzhiyun #include <linux/mmu_context.h>
17*4882a593Smuzhiyun #include <asm/switch_to.h>
18*4882a593Smuzhiyun #include <asm/ppc-opcode.h>
19*4882a593Smuzhiyun #include "vas.h"
20*4882a593Smuzhiyun #include "copy-paste.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
23*4882a593Smuzhiyun #include "vas-trace.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Compute the paste address region for the window @window using the
27*4882a593Smuzhiyun  * ->paste_base_addr and ->paste_win_id_shift we got from device tree.
28*4882a593Smuzhiyun  */
vas_win_paste_addr(struct vas_window * window,u64 * addr,int * len)29*4882a593Smuzhiyun void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	int winid;
32*4882a593Smuzhiyun 	u64 base, shift;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	base = window->vinst->paste_base_addr;
35*4882a593Smuzhiyun 	shift = window->vinst->paste_win_id_shift;
36*4882a593Smuzhiyun 	winid = window->winid;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	*addr  = base + (winid << shift);
39*4882a593Smuzhiyun 	if (len)
40*4882a593Smuzhiyun 		*len = PAGE_SIZE;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
get_hvwc_mmio_bar(struct vas_window * window,u64 * start,int * len)45*4882a593Smuzhiyun static inline void get_hvwc_mmio_bar(struct vas_window *window,
46*4882a593Smuzhiyun 			u64 *start, int *len)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	u64 pbaddr;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	pbaddr = window->vinst->hvwc_bar_start;
51*4882a593Smuzhiyun 	*start = pbaddr + window->winid * VAS_HVWC_SIZE;
52*4882a593Smuzhiyun 	*len = VAS_HVWC_SIZE;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
get_uwc_mmio_bar(struct vas_window * window,u64 * start,int * len)55*4882a593Smuzhiyun static inline void get_uwc_mmio_bar(struct vas_window *window,
56*4882a593Smuzhiyun 			u64 *start, int *len)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	u64 pbaddr;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	pbaddr = window->vinst->uwc_bar_start;
61*4882a593Smuzhiyun 	*start = pbaddr + window->winid * VAS_UWC_SIZE;
62*4882a593Smuzhiyun 	*len = VAS_UWC_SIZE;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * Map the paste bus address of the given send window into kernel address
67*4882a593Smuzhiyun  * space. Unlike MMIO regions (map_mmio_region() below), paste region must
68*4882a593Smuzhiyun  * be mapped cache-able and is only applicable to send windows.
69*4882a593Smuzhiyun  */
map_paste_region(struct vas_window * txwin)70*4882a593Smuzhiyun static void *map_paste_region(struct vas_window *txwin)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	int len;
73*4882a593Smuzhiyun 	void *map;
74*4882a593Smuzhiyun 	char *name;
75*4882a593Smuzhiyun 	u64 start;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id,
78*4882a593Smuzhiyun 				txwin->winid);
79*4882a593Smuzhiyun 	if (!name)
80*4882a593Smuzhiyun 		goto free_name;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	txwin->paste_addr_name = name;
83*4882a593Smuzhiyun 	vas_win_paste_addr(txwin, &start, &len);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (!request_mem_region(start, len, name)) {
86*4882a593Smuzhiyun 		pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
87*4882a593Smuzhiyun 				__func__, start, len);
88*4882a593Smuzhiyun 		goto free_name;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	map = ioremap_cache(start, len);
92*4882a593Smuzhiyun 	if (!map) {
93*4882a593Smuzhiyun 		pr_devel("%s(): ioremap_cache(0x%llx, %d) failed\n", __func__,
94*4882a593Smuzhiyun 				start, len);
95*4882a593Smuzhiyun 		goto free_name;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map);
99*4882a593Smuzhiyun 	return map;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun free_name:
102*4882a593Smuzhiyun 	kfree(name);
103*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
map_mmio_region(char * name,u64 start,int len)106*4882a593Smuzhiyun static void *map_mmio_region(char *name, u64 start, int len)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	void *map;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (!request_mem_region(start, len, name)) {
111*4882a593Smuzhiyun 		pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
112*4882a593Smuzhiyun 				__func__, start, len);
113*4882a593Smuzhiyun 		return NULL;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	map = ioremap(start, len);
117*4882a593Smuzhiyun 	if (!map) {
118*4882a593Smuzhiyun 		pr_devel("%s(): ioremap(0x%llx, %d) failed\n", __func__, start,
119*4882a593Smuzhiyun 				len);
120*4882a593Smuzhiyun 		return NULL;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	return map;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
unmap_region(void * addr,u64 start,int len)126*4882a593Smuzhiyun static void unmap_region(void *addr, u64 start, int len)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	iounmap(addr);
129*4882a593Smuzhiyun 	release_mem_region((phys_addr_t)start, len);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun  * Unmap the paste address region for a window.
134*4882a593Smuzhiyun  */
unmap_paste_region(struct vas_window * window)135*4882a593Smuzhiyun static void unmap_paste_region(struct vas_window *window)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	int len;
138*4882a593Smuzhiyun 	u64 busaddr_start;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (window->paste_kaddr) {
141*4882a593Smuzhiyun 		vas_win_paste_addr(window, &busaddr_start, &len);
142*4882a593Smuzhiyun 		unmap_region(window->paste_kaddr, busaddr_start, len);
143*4882a593Smuzhiyun 		window->paste_kaddr = NULL;
144*4882a593Smuzhiyun 		kfree(window->paste_addr_name);
145*4882a593Smuzhiyun 		window->paste_addr_name = NULL;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun  * Unmap the MMIO regions for a window. Hold the vas_mutex so we don't
151*4882a593Smuzhiyun  * unmap when the window's debugfs dir is in use. This serializes close
152*4882a593Smuzhiyun  * of a window even on another VAS instance but since its not a critical
153*4882a593Smuzhiyun  * path, just minimize the time we hold the mutex for now. We can add
154*4882a593Smuzhiyun  * a per-instance mutex later if necessary.
155*4882a593Smuzhiyun  */
unmap_winctx_mmio_bars(struct vas_window * window)156*4882a593Smuzhiyun static void unmap_winctx_mmio_bars(struct vas_window *window)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	int len;
159*4882a593Smuzhiyun 	void *uwc_map;
160*4882a593Smuzhiyun 	void *hvwc_map;
161*4882a593Smuzhiyun 	u64 busaddr_start;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	mutex_lock(&vas_mutex);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	hvwc_map = window->hvwc_map;
166*4882a593Smuzhiyun 	window->hvwc_map = NULL;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	uwc_map = window->uwc_map;
169*4882a593Smuzhiyun 	window->uwc_map = NULL;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	mutex_unlock(&vas_mutex);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (hvwc_map) {
174*4882a593Smuzhiyun 		get_hvwc_mmio_bar(window, &busaddr_start, &len);
175*4882a593Smuzhiyun 		unmap_region(hvwc_map, busaddr_start, len);
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (uwc_map) {
179*4882a593Smuzhiyun 		get_uwc_mmio_bar(window, &busaddr_start, &len);
180*4882a593Smuzhiyun 		unmap_region(uwc_map, busaddr_start, len);
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * Find the Hypervisor Window Context (HVWC) MMIO Base Address Region and the
186*4882a593Smuzhiyun  * OS/User Window Context (UWC) MMIO Base Address Region for the given window.
187*4882a593Smuzhiyun  * Map these bus addresses and save the mapped kernel addresses in @window.
188*4882a593Smuzhiyun  */
map_winctx_mmio_bars(struct vas_window * window)189*4882a593Smuzhiyun static int map_winctx_mmio_bars(struct vas_window *window)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	int len;
192*4882a593Smuzhiyun 	u64 start;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	get_hvwc_mmio_bar(window, &start, &len);
195*4882a593Smuzhiyun 	window->hvwc_map = map_mmio_region("HVWCM_Window", start, len);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	get_uwc_mmio_bar(window, &start, &len);
198*4882a593Smuzhiyun 	window->uwc_map = map_mmio_region("UWCM_Window", start, len);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (!window->hvwc_map || !window->uwc_map) {
201*4882a593Smuzhiyun 		unmap_winctx_mmio_bars(window);
202*4882a593Smuzhiyun 		return -1;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun  * Reset all valid registers in the HV and OS/User Window Contexts for
210*4882a593Smuzhiyun  * the window identified by @window.
211*4882a593Smuzhiyun  *
212*4882a593Smuzhiyun  * NOTE: We cannot really use a for loop to reset window context. Not all
213*4882a593Smuzhiyun  *	 offsets in a window context are valid registers and the valid
214*4882a593Smuzhiyun  *	 registers are not sequential. And, we can only write to offsets
215*4882a593Smuzhiyun  *	 with valid registers.
216*4882a593Smuzhiyun  */
reset_window_regs(struct vas_window * window)217*4882a593Smuzhiyun static void reset_window_regs(struct vas_window *window)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LPID), 0ULL);
220*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(PID), 0ULL);
221*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(XLATE_MSR), 0ULL);
222*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(XLATE_LPCR), 0ULL);
223*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(XLATE_CTL), 0ULL);
224*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(AMR), 0ULL);
225*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(SEIDR), 0ULL);
226*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(FAULT_TX_WIN), 0ULL);
227*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);
228*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), 0ULL);
229*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(PSWID), 0ULL);
230*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LFIFO_BAR), 0ULL);
231*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), 0ULL);
232*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), 0ULL);
233*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
234*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
235*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);
236*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LRX_WCRED), 0ULL);
237*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
238*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(TX_WCRED), 0ULL);
239*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
240*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LFIFO_SIZE), 0ULL);
241*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(WINCTL), 0ULL);
242*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);
243*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), 0ULL);
244*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(TX_RSVD_BUF_COUNT), 0ULL);
245*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), 0ULL);
246*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_CTL), 0ULL);
247*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_PID), 0ULL);
248*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_LPID), 0ULL);
249*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_TID), 0ULL);
250*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), 0ULL);
251*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	/* Skip read-only registers: NX_UTIL and NX_UTIL_SE */
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/*
256*4882a593Smuzhiyun 	 * The send and receive window credit adder registers are also
257*4882a593Smuzhiyun 	 * accessible from HVWC and have been initialized above. We don't
258*4882a593Smuzhiyun 	 * need to initialize from the OS/User Window Context, so skip
259*4882a593Smuzhiyun 	 * following calls:
260*4882a593Smuzhiyun 	 *
261*4882a593Smuzhiyun 	 *	write_uwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
262*4882a593Smuzhiyun 	 *	write_uwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
263*4882a593Smuzhiyun 	 */
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun  * Initialize window context registers related to Address Translation.
268*4882a593Smuzhiyun  * These registers are common to send/receive windows although they
269*4882a593Smuzhiyun  * differ for user/kernel windows. As we resolve the TODOs we may
270*4882a593Smuzhiyun  * want to add fields to vas_winctx and move the initialization to
271*4882a593Smuzhiyun  * init_vas_winctx_regs().
272*4882a593Smuzhiyun  */
init_xlate_regs(struct vas_window * window,bool user_win)273*4882a593Smuzhiyun static void init_xlate_regs(struct vas_window *window, bool user_win)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	u64 lpcr, val;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/*
278*4882a593Smuzhiyun 	 * MSR_TA, MSR_US are false for both kernel and user.
279*4882a593Smuzhiyun 	 * MSR_DR and MSR_PR are false for kernel.
280*4882a593Smuzhiyun 	 */
281*4882a593Smuzhiyun 	val = 0ULL;
282*4882a593Smuzhiyun 	val = SET_FIELD(VAS_XLATE_MSR_HV, val, 1);
283*4882a593Smuzhiyun 	val = SET_FIELD(VAS_XLATE_MSR_SF, val, 1);
284*4882a593Smuzhiyun 	if (user_win) {
285*4882a593Smuzhiyun 		val = SET_FIELD(VAS_XLATE_MSR_DR, val, 1);
286*4882a593Smuzhiyun 		val = SET_FIELD(VAS_XLATE_MSR_PR, val, 1);
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(XLATE_MSR), val);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	lpcr = mfspr(SPRN_LPCR);
291*4882a593Smuzhiyun 	val = 0ULL;
292*4882a593Smuzhiyun 	/*
293*4882a593Smuzhiyun 	 * NOTE: From Section 5.7.8.1 Segment Lookaside Buffer of the
294*4882a593Smuzhiyun 	 *	 Power ISA, v3.0B, Page size encoding is 0 = 4KB, 5 = 64KB.
295*4882a593Smuzhiyun 	 *
296*4882a593Smuzhiyun 	 * NOTE: From Section 1.3.1, Address Translation Context of the
297*4882a593Smuzhiyun 	 *	 Nest MMU Workbook, LPCR_SC should be 0 for Power9.
298*4882a593Smuzhiyun 	 */
299*4882a593Smuzhiyun 	val = SET_FIELD(VAS_XLATE_LPCR_PAGE_SIZE, val, 5);
300*4882a593Smuzhiyun 	val = SET_FIELD(VAS_XLATE_LPCR_ISL, val, lpcr & LPCR_ISL);
301*4882a593Smuzhiyun 	val = SET_FIELD(VAS_XLATE_LPCR_TC, val, lpcr & LPCR_TC);
302*4882a593Smuzhiyun 	val = SET_FIELD(VAS_XLATE_LPCR_SC, val, 0);
303*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(XLATE_LPCR), val);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/*
306*4882a593Smuzhiyun 	 * Section 1.3.1 (Address translation Context) of NMMU workbook.
307*4882a593Smuzhiyun 	 *	0b00	Hashed Page Table mode
308*4882a593Smuzhiyun 	 *	0b01	Reserved
309*4882a593Smuzhiyun 	 *	0b10	Radix on HPT
310*4882a593Smuzhiyun 	 *	0b11	Radix on Radix
311*4882a593Smuzhiyun 	 */
312*4882a593Smuzhiyun 	val = 0ULL;
313*4882a593Smuzhiyun 	val = SET_FIELD(VAS_XLATE_MODE, val, radix_enabled() ? 3 : 2);
314*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(XLATE_CTL), val);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/*
317*4882a593Smuzhiyun 	 * TODO: Can we mfspr(AMR) even for user windows?
318*4882a593Smuzhiyun 	 */
319*4882a593Smuzhiyun 	val = 0ULL;
320*4882a593Smuzhiyun 	val = SET_FIELD(VAS_AMR, val, mfspr(SPRN_AMR));
321*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(AMR), val);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	val = 0ULL;
324*4882a593Smuzhiyun 	val = SET_FIELD(VAS_SEIDR, val, 0);
325*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(SEIDR), val);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun  * Initialize Reserved Send Buffer Count for the send window. It involves
330*4882a593Smuzhiyun  * writing to the register, reading it back to confirm that the hardware
331*4882a593Smuzhiyun  * has enough buffers to reserve. See section 1.3.1.2.1 of VAS workbook.
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * Since we can only make a best-effort attempt to fulfill the request,
334*4882a593Smuzhiyun  * we don't return any errors if we cannot.
335*4882a593Smuzhiyun  *
336*4882a593Smuzhiyun  * TODO: Reserved (aka dedicated) send buffers are not supported yet.
337*4882a593Smuzhiyun  */
init_rsvd_tx_buf_count(struct vas_window * txwin,struct vas_winctx * winctx)338*4882a593Smuzhiyun static void init_rsvd_tx_buf_count(struct vas_window *txwin,
339*4882a593Smuzhiyun 				struct vas_winctx *winctx)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun  * init_winctx_regs()
346*4882a593Smuzhiyun  *	Initialize window context registers for a receive window.
347*4882a593Smuzhiyun  *	Except for caching control and marking window open, the registers
348*4882a593Smuzhiyun  *	are initialized in the order listed in Section 3.1.4 (Window Context
349*4882a593Smuzhiyun  *	Cache Register Details) of the VAS workbook although they don't need
350*4882a593Smuzhiyun  *	to be.
351*4882a593Smuzhiyun  *
352*4882a593Smuzhiyun  * Design note: For NX receive windows, NX allocates the FIFO buffer in OPAL
353*4882a593Smuzhiyun  *	(so that it can get a large contiguous area) and passes that buffer
354*4882a593Smuzhiyun  *	to kernel via device tree. We now write that buffer address to the
355*4882a593Smuzhiyun  *	FIFO BAR. Would it make sense to do this all in OPAL? i.e have OPAL
356*4882a593Smuzhiyun  *	write the per-chip RX FIFO addresses to the windows during boot-up
357*4882a593Smuzhiyun  *	as a one-time task? That could work for NX but what about other
358*4882a593Smuzhiyun  *	receivers?  Let the receivers tell us the rx-fifo buffers for now.
359*4882a593Smuzhiyun  */
init_winctx_regs(struct vas_window * window,struct vas_winctx * winctx)360*4882a593Smuzhiyun static void init_winctx_regs(struct vas_window *window,
361*4882a593Smuzhiyun 			     struct vas_winctx *winctx)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	u64 val;
364*4882a593Smuzhiyun 	int fifo_size;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	reset_window_regs(window);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	val = 0ULL;
369*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LPID, val, winctx->lpid);
370*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LPID), val);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	val = 0ULL;
373*4882a593Smuzhiyun 	val = SET_FIELD(VAS_PID_ID, val, winctx->pidr);
374*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(PID), val);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	init_xlate_regs(window, winctx->user_win);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	val = 0ULL;
379*4882a593Smuzhiyun 	val = SET_FIELD(VAS_FAULT_TX_WIN, val, winctx->fault_win_id);
380*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(FAULT_TX_WIN), val);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* In PowerNV, interrupts go to HV. */
383*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	val = 0ULL;
386*4882a593Smuzhiyun 	val = SET_FIELD(VAS_HV_INTR_SRC_RA, val, winctx->irq_port);
387*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), val);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	val = 0ULL;
390*4882a593Smuzhiyun 	val = SET_FIELD(VAS_PSWID_EA_HANDLE, val, winctx->pswid);
391*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(PSWID), val);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(SPARE1), 0ULL);
394*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(SPARE2), 0ULL);
395*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(SPARE3), 0ULL);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/*
398*4882a593Smuzhiyun 	 * NOTE: VAS expects the FIFO address to be copied into the LFIFO_BAR
399*4882a593Smuzhiyun 	 *	 register as is - do NOT shift the address into VAS_LFIFO_BAR
400*4882a593Smuzhiyun 	 *	 bit fields! Ok to set the page migration select fields -
401*4882a593Smuzhiyun 	 *	 VAS ignores the lower 10+ bits in the address anyway, because
402*4882a593Smuzhiyun 	 *	 the minimum FIFO size is 1K?
403*4882a593Smuzhiyun 	 *
404*4882a593Smuzhiyun 	 * See also: Design note in function header.
405*4882a593Smuzhiyun 	 */
406*4882a593Smuzhiyun 	val = winctx->rx_fifo;
407*4882a593Smuzhiyun 	val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0);
408*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LFIFO_BAR), val);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	val = 0ULL;
411*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LDATA_STAMP, val, winctx->data_stamp);
412*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), val);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	val = 0ULL;
415*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LDMA_TYPE, val, winctx->dma_type);
416*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LDMA_FIFO_DISABLE, val, winctx->fifo_disable);
417*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), val);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
420*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
421*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	val = 0ULL;
424*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LRX_WCRED, val, winctx->wcreds_max);
425*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LRX_WCRED), val);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	val = 0ULL;
428*4882a593Smuzhiyun 	val = SET_FIELD(VAS_TX_WCRED, val, winctx->wcreds_max);
429*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(TX_WCRED), val);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
432*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	fifo_size = winctx->rx_fifo_size / 1024;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	val = 0ULL;
437*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LFIFO_SIZE, val, ilog2(fifo_size));
438*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LFIFO_SIZE), val);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	/* Update window control and caching control registers last so
441*4882a593Smuzhiyun 	 * we mark the window open only after fully initializing it and
442*4882a593Smuzhiyun 	 * pushing context to cache.
443*4882a593Smuzhiyun 	 */
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	init_rsvd_tx_buf_count(window, winctx);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/* for a send window, point to the matching receive window */
450*4882a593Smuzhiyun 	val = 0ULL;
451*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LRX_WIN_ID, val, winctx->rx_win_id);
452*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), val);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(SPARE4), 0ULL);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	val = 0ULL;
457*4882a593Smuzhiyun 	val = SET_FIELD(VAS_NOTIFY_DISABLE, val, winctx->notify_disable);
458*4882a593Smuzhiyun 	val = SET_FIELD(VAS_INTR_DISABLE, val, winctx->intr_disable);
459*4882a593Smuzhiyun 	val = SET_FIELD(VAS_NOTIFY_EARLY, val, winctx->notify_early);
460*4882a593Smuzhiyun 	val = SET_FIELD(VAS_NOTIFY_OSU_INTR, val, winctx->notify_os_intr_reg);
461*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_CTL), val);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	val = 0ULL;
464*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LNOTIFY_PID, val, winctx->lnotify_pid);
465*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_PID), val);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	val = 0ULL;
468*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LNOTIFY_LPID, val, winctx->lnotify_lpid);
469*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_LPID), val);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	val = 0ULL;
472*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LNOTIFY_TID, val, winctx->lnotify_tid);
473*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_TID), val);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	val = 0ULL;
476*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LNOTIFY_MIN_SCOPE, val, winctx->min_scope);
477*4882a593Smuzhiyun 	val = SET_FIELD(VAS_LNOTIFY_MAX_SCOPE, val, winctx->max_scope);
478*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), val);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	/* Skip read-only registers NX_UTIL and NX_UTIL_SE */
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(SPARE5), 0ULL);
483*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
484*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(SPARE6), 0ULL);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/* Finally, push window context to memory and... */
487*4882a593Smuzhiyun 	val = 0ULL;
488*4882a593Smuzhiyun 	val = SET_FIELD(VAS_PUSH_TO_MEM, val, 1);
489*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* ... mark the window open for business */
492*4882a593Smuzhiyun 	val = 0ULL;
493*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_REJ_NO_CREDIT, val, winctx->rej_no_credit);
494*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_PIN, val, winctx->pin_win);
495*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val, winctx->tx_wcred_mode);
496*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val, winctx->rx_wcred_mode);
497*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_TX_WORD_MODE, val, winctx->tx_word_mode);
498*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_RX_WORD_MODE, val, winctx->rx_word_mode);
499*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_FAULT_WIN, val, winctx->fault_win);
500*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win);
501*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_OPEN, val, 1);
502*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(WINCTL), val);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
vas_release_window_id(struct ida * ida,int winid)505*4882a593Smuzhiyun static void vas_release_window_id(struct ida *ida, int winid)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	ida_free(ida, winid);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
vas_assign_window_id(struct ida * ida)510*4882a593Smuzhiyun static int vas_assign_window_id(struct ida *ida)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	if (winid == -ENOSPC) {
515*4882a593Smuzhiyun 		pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP);
516*4882a593Smuzhiyun 		return -EAGAIN;
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	return winid;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
vas_window_free(struct vas_window * window)522*4882a593Smuzhiyun static void vas_window_free(struct vas_window *window)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	int winid = window->winid;
525*4882a593Smuzhiyun 	struct vas_instance *vinst = window->vinst;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	unmap_winctx_mmio_bars(window);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	vas_window_free_dbgdir(window);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	kfree(window);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	vas_release_window_id(&vinst->ida, winid);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
vas_window_alloc(struct vas_instance * vinst)536*4882a593Smuzhiyun static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	int winid;
539*4882a593Smuzhiyun 	struct vas_window *window;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	winid = vas_assign_window_id(&vinst->ida);
542*4882a593Smuzhiyun 	if (winid < 0)
543*4882a593Smuzhiyun 		return ERR_PTR(winid);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	window = kzalloc(sizeof(*window), GFP_KERNEL);
546*4882a593Smuzhiyun 	if (!window)
547*4882a593Smuzhiyun 		goto out_free;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	window->vinst = vinst;
550*4882a593Smuzhiyun 	window->winid = winid;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (map_winctx_mmio_bars(window))
553*4882a593Smuzhiyun 		goto out_free;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	vas_window_init_dbgdir(window);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	return window;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun out_free:
560*4882a593Smuzhiyun 	kfree(window);
561*4882a593Smuzhiyun 	vas_release_window_id(&vinst->ida, winid);
562*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
put_rx_win(struct vas_window * rxwin)565*4882a593Smuzhiyun static void put_rx_win(struct vas_window *rxwin)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	/* Better not be a send window! */
568*4882a593Smuzhiyun 	WARN_ON_ONCE(rxwin->tx_win);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	atomic_dec(&rxwin->num_txwins);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun /*
574*4882a593Smuzhiyun  * Find the user space receive window given the @pswid.
575*4882a593Smuzhiyun  *      - We must have a valid vasid and it must belong to this instance.
576*4882a593Smuzhiyun  *        (so both send and receive windows are on the same VAS instance)
577*4882a593Smuzhiyun  *      - The window must refer to an OPEN, FTW, RECEIVE window.
578*4882a593Smuzhiyun  *
579*4882a593Smuzhiyun  * NOTE: We access ->windows[] table and assume that vinst->mutex is held.
580*4882a593Smuzhiyun  */
get_user_rxwin(struct vas_instance * vinst,u32 pswid)581*4882a593Smuzhiyun static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	int vasid, winid;
584*4882a593Smuzhiyun 	struct vas_window *rxwin;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	decode_pswid(pswid, &vasid, &winid);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	if (vinst->vas_id != vasid)
589*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	rxwin = vinst->windows[winid];
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	if (!rxwin || rxwin->tx_win || rxwin->cop != VAS_COP_TYPE_FTW)
594*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return rxwin;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun  * Get the VAS receive window associated with NX engine identified
601*4882a593Smuzhiyun  * by @cop and if applicable, @pswid.
602*4882a593Smuzhiyun  *
603*4882a593Smuzhiyun  * See also function header of set_vinst_win().
604*4882a593Smuzhiyun  */
get_vinst_rxwin(struct vas_instance * vinst,enum vas_cop_type cop,u32 pswid)605*4882a593Smuzhiyun static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
606*4882a593Smuzhiyun 			enum vas_cop_type cop, u32 pswid)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	struct vas_window *rxwin;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	mutex_lock(&vinst->mutex);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (cop == VAS_COP_TYPE_FTW)
613*4882a593Smuzhiyun 		rxwin = get_user_rxwin(vinst, pswid);
614*4882a593Smuzhiyun 	else
615*4882a593Smuzhiyun 		rxwin = vinst->rxwin[cop] ?: ERR_PTR(-EINVAL);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (!IS_ERR(rxwin))
618*4882a593Smuzhiyun 		atomic_inc(&rxwin->num_txwins);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	mutex_unlock(&vinst->mutex);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	return rxwin;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun /*
626*4882a593Smuzhiyun  * We have two tables of windows in a VAS instance. The first one,
627*4882a593Smuzhiyun  * ->windows[], contains all the windows in the instance and allows
628*4882a593Smuzhiyun  * looking up a window by its id. It is used to look up send windows
629*4882a593Smuzhiyun  * during fault handling and receive windows when pairing user space
630*4882a593Smuzhiyun  * send/receive windows.
631*4882a593Smuzhiyun  *
632*4882a593Smuzhiyun  * The second table, ->rxwin[], contains receive windows that are
633*4882a593Smuzhiyun  * associated with NX engines. This table has VAS_COP_TYPE_MAX
634*4882a593Smuzhiyun  * entries and is used to look up a receive window by its
635*4882a593Smuzhiyun  * coprocessor type.
636*4882a593Smuzhiyun  *
637*4882a593Smuzhiyun  * Here, we save @window in the ->windows[] table. If it is a receive
638*4882a593Smuzhiyun  * window, we also save the window in the ->rxwin[] table.
639*4882a593Smuzhiyun  */
set_vinst_win(struct vas_instance * vinst,struct vas_window * window)640*4882a593Smuzhiyun static void set_vinst_win(struct vas_instance *vinst,
641*4882a593Smuzhiyun 			struct vas_window *window)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	int id = window->winid;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	mutex_lock(&vinst->mutex);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	/*
648*4882a593Smuzhiyun 	 * There should only be one receive window for a coprocessor type
649*4882a593Smuzhiyun 	 * unless its a user (FTW) window.
650*4882a593Smuzhiyun 	 */
651*4882a593Smuzhiyun 	if (!window->user_win && !window->tx_win) {
652*4882a593Smuzhiyun 		WARN_ON_ONCE(vinst->rxwin[window->cop]);
653*4882a593Smuzhiyun 		vinst->rxwin[window->cop] = window;
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	WARN_ON_ONCE(vinst->windows[id] != NULL);
657*4882a593Smuzhiyun 	vinst->windows[id] = window;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	mutex_unlock(&vinst->mutex);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun /*
663*4882a593Smuzhiyun  * Clear this window from the table(s) of windows for this VAS instance.
664*4882a593Smuzhiyun  * See also function header of set_vinst_win().
665*4882a593Smuzhiyun  */
clear_vinst_win(struct vas_window * window)666*4882a593Smuzhiyun static void clear_vinst_win(struct vas_window *window)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun 	int id = window->winid;
669*4882a593Smuzhiyun 	struct vas_instance *vinst = window->vinst;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	mutex_lock(&vinst->mutex);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	if (!window->user_win && !window->tx_win) {
674*4882a593Smuzhiyun 		WARN_ON_ONCE(!vinst->rxwin[window->cop]);
675*4882a593Smuzhiyun 		vinst->rxwin[window->cop] = NULL;
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	WARN_ON_ONCE(vinst->windows[id] != window);
679*4882a593Smuzhiyun 	vinst->windows[id] = NULL;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	mutex_unlock(&vinst->mutex);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
init_winctx_for_rxwin(struct vas_window * rxwin,struct vas_rx_win_attr * rxattr,struct vas_winctx * winctx)684*4882a593Smuzhiyun static void init_winctx_for_rxwin(struct vas_window *rxwin,
685*4882a593Smuzhiyun 			struct vas_rx_win_attr *rxattr,
686*4882a593Smuzhiyun 			struct vas_winctx *winctx)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	/*
689*4882a593Smuzhiyun 	 * We first zero (memset()) all fields and only set non-zero fields.
690*4882a593Smuzhiyun 	 * Following fields are 0/false but maybe deserve a comment:
691*4882a593Smuzhiyun 	 *
692*4882a593Smuzhiyun 	 *	->notify_os_intr_reg	In powerNV, send intrs to HV
693*4882a593Smuzhiyun 	 *	->notify_disable	False for NX windows
694*4882a593Smuzhiyun 	 *	->intr_disable		False for Fault Windows
695*4882a593Smuzhiyun 	 *	->xtra_write		False for NX windows
696*4882a593Smuzhiyun 	 *	->notify_early		NA for NX windows
697*4882a593Smuzhiyun 	 *	->rsvd_txbuf_count	NA for Rx windows
698*4882a593Smuzhiyun 	 *	->lpid, ->pid, ->tid	NA for Rx windows
699*4882a593Smuzhiyun 	 */
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	memset(winctx, 0, sizeof(struct vas_winctx));
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	winctx->rx_fifo = rxattr->rx_fifo;
704*4882a593Smuzhiyun 	winctx->rx_fifo_size = rxattr->rx_fifo_size;
705*4882a593Smuzhiyun 	winctx->wcreds_max = rxwin->wcreds_max;
706*4882a593Smuzhiyun 	winctx->pin_win = rxattr->pin_win;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	winctx->nx_win = rxattr->nx_win;
709*4882a593Smuzhiyun 	winctx->fault_win = rxattr->fault_win;
710*4882a593Smuzhiyun 	winctx->user_win = rxattr->user_win;
711*4882a593Smuzhiyun 	winctx->rej_no_credit = rxattr->rej_no_credit;
712*4882a593Smuzhiyun 	winctx->rx_word_mode = rxattr->rx_win_ord_mode;
713*4882a593Smuzhiyun 	winctx->tx_word_mode = rxattr->tx_win_ord_mode;
714*4882a593Smuzhiyun 	winctx->rx_wcred_mode = rxattr->rx_wcred_mode;
715*4882a593Smuzhiyun 	winctx->tx_wcred_mode = rxattr->tx_wcred_mode;
716*4882a593Smuzhiyun 	winctx->notify_early = rxattr->notify_early;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	if (winctx->nx_win) {
719*4882a593Smuzhiyun 		winctx->data_stamp = true;
720*4882a593Smuzhiyun 		winctx->intr_disable = true;
721*4882a593Smuzhiyun 		winctx->pin_win = true;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 		WARN_ON_ONCE(winctx->fault_win);
724*4882a593Smuzhiyun 		WARN_ON_ONCE(!winctx->rx_word_mode);
725*4882a593Smuzhiyun 		WARN_ON_ONCE(!winctx->tx_word_mode);
726*4882a593Smuzhiyun 		WARN_ON_ONCE(winctx->notify_after_count);
727*4882a593Smuzhiyun 	} else if (winctx->fault_win) {
728*4882a593Smuzhiyun 		winctx->notify_disable = true;
729*4882a593Smuzhiyun 	} else if (winctx->user_win) {
730*4882a593Smuzhiyun 		/*
731*4882a593Smuzhiyun 		 * Section 1.8.1 Low Latency Core-Core Wake up of
732*4882a593Smuzhiyun 		 * the VAS workbook:
733*4882a593Smuzhiyun 		 *
734*4882a593Smuzhiyun 		 *      - disable credit checks ([tr]x_wcred_mode = false)
735*4882a593Smuzhiyun 		 *      - disable FIFO writes
736*4882a593Smuzhiyun 		 *      - enable ASB_Notify, disable interrupt
737*4882a593Smuzhiyun 		 */
738*4882a593Smuzhiyun 		winctx->fifo_disable = true;
739*4882a593Smuzhiyun 		winctx->intr_disable = true;
740*4882a593Smuzhiyun 		winctx->rx_fifo = 0;
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	winctx->lnotify_lpid = rxattr->lnotify_lpid;
744*4882a593Smuzhiyun 	winctx->lnotify_pid = rxattr->lnotify_pid;
745*4882a593Smuzhiyun 	winctx->lnotify_tid = rxattr->lnotify_tid;
746*4882a593Smuzhiyun 	winctx->pswid = rxattr->pswid;
747*4882a593Smuzhiyun 	winctx->dma_type = VAS_DMA_TYPE_INJECT;
748*4882a593Smuzhiyun 	winctx->tc_mode = rxattr->tc_mode;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	winctx->min_scope = VAS_SCOPE_LOCAL;
751*4882a593Smuzhiyun 	winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
752*4882a593Smuzhiyun 	if (rxwin->vinst->virq)
753*4882a593Smuzhiyun 		winctx->irq_port = rxwin->vinst->irq_port;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
rx_win_args_valid(enum vas_cop_type cop,struct vas_rx_win_attr * attr)756*4882a593Smuzhiyun static bool rx_win_args_valid(enum vas_cop_type cop,
757*4882a593Smuzhiyun 			struct vas_rx_win_attr *attr)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	pr_debug("Rxattr: fault %d, notify %d, intr %d, early %d, fifo %d\n",
760*4882a593Smuzhiyun 			attr->fault_win, attr->notify_disable,
761*4882a593Smuzhiyun 			attr->intr_disable, attr->notify_early,
762*4882a593Smuzhiyun 			attr->rx_fifo_size);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if (cop >= VAS_COP_TYPE_MAX)
765*4882a593Smuzhiyun 		return false;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if (cop != VAS_COP_TYPE_FTW &&
768*4882a593Smuzhiyun 				attr->rx_fifo_size < VAS_RX_FIFO_SIZE_MIN)
769*4882a593Smuzhiyun 		return false;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX)
772*4882a593Smuzhiyun 		return false;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	if (!attr->wcreds_max)
775*4882a593Smuzhiyun 		return false;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	if (attr->nx_win) {
778*4882a593Smuzhiyun 		/* cannot be fault or user window if it is nx */
779*4882a593Smuzhiyun 		if (attr->fault_win || attr->user_win)
780*4882a593Smuzhiyun 			return false;
781*4882a593Smuzhiyun 		/*
782*4882a593Smuzhiyun 		 * Section 3.1.4.32: NX Windows must not disable notification,
783*4882a593Smuzhiyun 		 *	and must not enable interrupts or early notification.
784*4882a593Smuzhiyun 		 */
785*4882a593Smuzhiyun 		if (attr->notify_disable || !attr->intr_disable ||
786*4882a593Smuzhiyun 				attr->notify_early)
787*4882a593Smuzhiyun 			return false;
788*4882a593Smuzhiyun 	} else if (attr->fault_win) {
789*4882a593Smuzhiyun 		/* cannot be both fault and user window */
790*4882a593Smuzhiyun 		if (attr->user_win)
791*4882a593Smuzhiyun 			return false;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 		/*
794*4882a593Smuzhiyun 		 * Section 3.1.4.32: Fault windows must disable notification
795*4882a593Smuzhiyun 		 *	but not interrupts.
796*4882a593Smuzhiyun 		 */
797*4882a593Smuzhiyun 		if (!attr->notify_disable || attr->intr_disable)
798*4882a593Smuzhiyun 			return false;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	} else if (attr->user_win) {
801*4882a593Smuzhiyun 		/*
802*4882a593Smuzhiyun 		 * User receive windows are only for fast-thread-wakeup
803*4882a593Smuzhiyun 		 * (FTW). They don't need a FIFO and must disable interrupts
804*4882a593Smuzhiyun 		 */
805*4882a593Smuzhiyun 		if (attr->rx_fifo || attr->rx_fifo_size || !attr->intr_disable)
806*4882a593Smuzhiyun 			return false;
807*4882a593Smuzhiyun 	} else {
808*4882a593Smuzhiyun 		/* Rx window must be one of NX or Fault or User window. */
809*4882a593Smuzhiyun 		return false;
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	return true;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun 
vas_init_rx_win_attr(struct vas_rx_win_attr * rxattr,enum vas_cop_type cop)815*4882a593Smuzhiyun void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun 	memset(rxattr, 0, sizeof(*rxattr));
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI ||
820*4882a593Smuzhiyun 		cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) {
821*4882a593Smuzhiyun 		rxattr->pin_win = true;
822*4882a593Smuzhiyun 		rxattr->nx_win = true;
823*4882a593Smuzhiyun 		rxattr->fault_win = false;
824*4882a593Smuzhiyun 		rxattr->intr_disable = true;
825*4882a593Smuzhiyun 		rxattr->rx_wcred_mode = true;
826*4882a593Smuzhiyun 		rxattr->tx_wcred_mode = true;
827*4882a593Smuzhiyun 		rxattr->rx_win_ord_mode = true;
828*4882a593Smuzhiyun 		rxattr->tx_win_ord_mode = true;
829*4882a593Smuzhiyun 	} else if (cop == VAS_COP_TYPE_FAULT) {
830*4882a593Smuzhiyun 		rxattr->pin_win = true;
831*4882a593Smuzhiyun 		rxattr->fault_win = true;
832*4882a593Smuzhiyun 		rxattr->notify_disable = true;
833*4882a593Smuzhiyun 		rxattr->rx_wcred_mode = true;
834*4882a593Smuzhiyun 		rxattr->rx_win_ord_mode = true;
835*4882a593Smuzhiyun 		rxattr->rej_no_credit = true;
836*4882a593Smuzhiyun 		rxattr->tc_mode = VAS_THRESH_DISABLED;
837*4882a593Smuzhiyun 	} else if (cop == VAS_COP_TYPE_FTW) {
838*4882a593Smuzhiyun 		rxattr->user_win = true;
839*4882a593Smuzhiyun 		rxattr->intr_disable = true;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 		/*
842*4882a593Smuzhiyun 		 * As noted in the VAS Workbook we disable credit checks.
843*4882a593Smuzhiyun 		 * If we enable credit checks in the future, we must also
844*4882a593Smuzhiyun 		 * implement a mechanism to return the user credits or new
845*4882a593Smuzhiyun 		 * paste operations will fail.
846*4882a593Smuzhiyun 		 */
847*4882a593Smuzhiyun 	}
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vas_init_rx_win_attr);
850*4882a593Smuzhiyun 
vas_rx_win_open(int vasid,enum vas_cop_type cop,struct vas_rx_win_attr * rxattr)851*4882a593Smuzhiyun struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
852*4882a593Smuzhiyun 			struct vas_rx_win_attr *rxattr)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	struct vas_window *rxwin;
855*4882a593Smuzhiyun 	struct vas_winctx winctx;
856*4882a593Smuzhiyun 	struct vas_instance *vinst;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	trace_vas_rx_win_open(current, vasid, cop, rxattr);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	if (!rx_win_args_valid(cop, rxattr))
861*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	vinst = find_vas_instance(vasid);
864*4882a593Smuzhiyun 	if (!vinst) {
865*4882a593Smuzhiyun 		pr_devel("vasid %d not found!\n", vasid);
866*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun 	pr_devel("Found instance %d\n", vasid);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	rxwin = vas_window_alloc(vinst);
871*4882a593Smuzhiyun 	if (IS_ERR(rxwin)) {
872*4882a593Smuzhiyun 		pr_devel("Unable to allocate memory for Rx window\n");
873*4882a593Smuzhiyun 		return rxwin;
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	rxwin->tx_win = false;
877*4882a593Smuzhiyun 	rxwin->nx_win = rxattr->nx_win;
878*4882a593Smuzhiyun 	rxwin->user_win = rxattr->user_win;
879*4882a593Smuzhiyun 	rxwin->cop = cop;
880*4882a593Smuzhiyun 	rxwin->wcreds_max = rxattr->wcreds_max;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	init_winctx_for_rxwin(rxwin, rxattr, &winctx);
883*4882a593Smuzhiyun 	init_winctx_regs(rxwin, &winctx);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	set_vinst_win(vinst, rxwin);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	return rxwin;
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vas_rx_win_open);
890*4882a593Smuzhiyun 
vas_init_tx_win_attr(struct vas_tx_win_attr * txattr,enum vas_cop_type cop)891*4882a593Smuzhiyun void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun 	memset(txattr, 0, sizeof(*txattr));
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI ||
896*4882a593Smuzhiyun 		cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) {
897*4882a593Smuzhiyun 		txattr->rej_no_credit = false;
898*4882a593Smuzhiyun 		txattr->rx_wcred_mode = true;
899*4882a593Smuzhiyun 		txattr->tx_wcred_mode = true;
900*4882a593Smuzhiyun 		txattr->rx_win_ord_mode = true;
901*4882a593Smuzhiyun 		txattr->tx_win_ord_mode = true;
902*4882a593Smuzhiyun 	} else if (cop == VAS_COP_TYPE_FTW) {
903*4882a593Smuzhiyun 		txattr->user_win = true;
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vas_init_tx_win_attr);
907*4882a593Smuzhiyun 
init_winctx_for_txwin(struct vas_window * txwin,struct vas_tx_win_attr * txattr,struct vas_winctx * winctx)908*4882a593Smuzhiyun static void init_winctx_for_txwin(struct vas_window *txwin,
909*4882a593Smuzhiyun 			struct vas_tx_win_attr *txattr,
910*4882a593Smuzhiyun 			struct vas_winctx *winctx)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun 	/*
913*4882a593Smuzhiyun 	 * We first zero all fields and only set non-zero ones. Following
914*4882a593Smuzhiyun 	 * are some fields set to 0/false for the stated reason:
915*4882a593Smuzhiyun 	 *
916*4882a593Smuzhiyun 	 *	->notify_os_intr_reg	In powernv, send intrs to HV
917*4882a593Smuzhiyun 	 *	->rsvd_txbuf_count	Not supported yet.
918*4882a593Smuzhiyun 	 *	->notify_disable	False for NX windows
919*4882a593Smuzhiyun 	 *	->xtra_write		False for NX windows
920*4882a593Smuzhiyun 	 *	->notify_early		NA for NX windows
921*4882a593Smuzhiyun 	 *	->lnotify_lpid		NA for Tx windows
922*4882a593Smuzhiyun 	 *	->lnotify_pid		NA for Tx windows
923*4882a593Smuzhiyun 	 *	->lnotify_tid		NA for Tx windows
924*4882a593Smuzhiyun 	 *	->tx_win_cred_mode	Ignore for now for NX windows
925*4882a593Smuzhiyun 	 *	->rx_win_cred_mode	Ignore for now for NX windows
926*4882a593Smuzhiyun 	 */
927*4882a593Smuzhiyun 	memset(winctx, 0, sizeof(struct vas_winctx));
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	winctx->wcreds_max = txwin->wcreds_max;
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	winctx->user_win = txattr->user_win;
932*4882a593Smuzhiyun 	winctx->nx_win = txwin->rxwin->nx_win;
933*4882a593Smuzhiyun 	winctx->pin_win = txattr->pin_win;
934*4882a593Smuzhiyun 	winctx->rej_no_credit = txattr->rej_no_credit;
935*4882a593Smuzhiyun 	winctx->rsvd_txbuf_enable = txattr->rsvd_txbuf_enable;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	winctx->rx_wcred_mode = txattr->rx_wcred_mode;
938*4882a593Smuzhiyun 	winctx->tx_wcred_mode = txattr->tx_wcred_mode;
939*4882a593Smuzhiyun 	winctx->rx_word_mode = txattr->rx_win_ord_mode;
940*4882a593Smuzhiyun 	winctx->tx_word_mode = txattr->tx_win_ord_mode;
941*4882a593Smuzhiyun 	winctx->rsvd_txbuf_count = txattr->rsvd_txbuf_count;
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	winctx->intr_disable = true;
944*4882a593Smuzhiyun 	if (winctx->nx_win)
945*4882a593Smuzhiyun 		winctx->data_stamp = true;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	winctx->lpid = txattr->lpid;
948*4882a593Smuzhiyun 	winctx->pidr = txattr->pidr;
949*4882a593Smuzhiyun 	winctx->rx_win_id = txwin->rxwin->winid;
950*4882a593Smuzhiyun 	/*
951*4882a593Smuzhiyun 	 * IRQ and fault window setup is successful. Set fault window
952*4882a593Smuzhiyun 	 * for the send window so that ready to handle faults.
953*4882a593Smuzhiyun 	 */
954*4882a593Smuzhiyun 	if (txwin->vinst->virq)
955*4882a593Smuzhiyun 		winctx->fault_win_id = txwin->vinst->fault_win->winid;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	winctx->dma_type = VAS_DMA_TYPE_INJECT;
958*4882a593Smuzhiyun 	winctx->tc_mode = txattr->tc_mode;
959*4882a593Smuzhiyun 	winctx->min_scope = VAS_SCOPE_LOCAL;
960*4882a593Smuzhiyun 	winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
961*4882a593Smuzhiyun 	if (txwin->vinst->virq)
962*4882a593Smuzhiyun 		winctx->irq_port = txwin->vinst->irq_port;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	winctx->pswid = txattr->pswid ? txattr->pswid :
965*4882a593Smuzhiyun 			encode_pswid(txwin->vinst->vas_id, txwin->winid);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
tx_win_args_valid(enum vas_cop_type cop,struct vas_tx_win_attr * attr)968*4882a593Smuzhiyun static bool tx_win_args_valid(enum vas_cop_type cop,
969*4882a593Smuzhiyun 			struct vas_tx_win_attr *attr)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun 	if (attr->tc_mode != VAS_THRESH_DISABLED)
972*4882a593Smuzhiyun 		return false;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	if (cop > VAS_COP_TYPE_MAX)
975*4882a593Smuzhiyun 		return false;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	if (attr->wcreds_max > VAS_TX_WCREDS_MAX)
978*4882a593Smuzhiyun 		return false;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	if (attr->user_win) {
981*4882a593Smuzhiyun 		if (attr->rsvd_txbuf_count)
982*4882a593Smuzhiyun 			return false;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 		if (cop != VAS_COP_TYPE_FTW && cop != VAS_COP_TYPE_GZIP &&
985*4882a593Smuzhiyun 			cop != VAS_COP_TYPE_GZIP_HIPRI)
986*4882a593Smuzhiyun 			return false;
987*4882a593Smuzhiyun 	}
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	return true;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
vas_tx_win_open(int vasid,enum vas_cop_type cop,struct vas_tx_win_attr * attr)992*4882a593Smuzhiyun struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
993*4882a593Smuzhiyun 			struct vas_tx_win_attr *attr)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	int rc;
996*4882a593Smuzhiyun 	struct vas_window *txwin;
997*4882a593Smuzhiyun 	struct vas_window *rxwin;
998*4882a593Smuzhiyun 	struct vas_winctx winctx;
999*4882a593Smuzhiyun 	struct vas_instance *vinst;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	trace_vas_tx_win_open(current, vasid, cop, attr);
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	if (!tx_win_args_valid(cop, attr))
1004*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	/*
1007*4882a593Smuzhiyun 	 * If caller did not specify a vasid but specified the PSWID of a
1008*4882a593Smuzhiyun 	 * receive window (applicable only to FTW windows), use the vasid
1009*4882a593Smuzhiyun 	 * from that receive window.
1010*4882a593Smuzhiyun 	 */
1011*4882a593Smuzhiyun 	if (vasid == -1 && attr->pswid)
1012*4882a593Smuzhiyun 		decode_pswid(attr->pswid, &vasid, NULL);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	vinst = find_vas_instance(vasid);
1015*4882a593Smuzhiyun 	if (!vinst) {
1016*4882a593Smuzhiyun 		pr_devel("vasid %d not found!\n", vasid);
1017*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	rxwin = get_vinst_rxwin(vinst, cop, attr->pswid);
1021*4882a593Smuzhiyun 	if (IS_ERR(rxwin)) {
1022*4882a593Smuzhiyun 		pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop);
1023*4882a593Smuzhiyun 		return rxwin;
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	txwin = vas_window_alloc(vinst);
1027*4882a593Smuzhiyun 	if (IS_ERR(txwin)) {
1028*4882a593Smuzhiyun 		rc = PTR_ERR(txwin);
1029*4882a593Smuzhiyun 		goto put_rxwin;
1030*4882a593Smuzhiyun 	}
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	txwin->cop = cop;
1033*4882a593Smuzhiyun 	txwin->tx_win = 1;
1034*4882a593Smuzhiyun 	txwin->rxwin = rxwin;
1035*4882a593Smuzhiyun 	txwin->nx_win = txwin->rxwin->nx_win;
1036*4882a593Smuzhiyun 	txwin->user_win = attr->user_win;
1037*4882a593Smuzhiyun 	txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	init_winctx_for_txwin(txwin, attr, &winctx);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	init_winctx_regs(txwin, &winctx);
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	/*
1044*4882a593Smuzhiyun 	 * If its a kernel send window, map the window address into the
1045*4882a593Smuzhiyun 	 * kernel's address space. For user windows, user must issue an
1046*4882a593Smuzhiyun 	 * mmap() to map the window into their address space.
1047*4882a593Smuzhiyun 	 *
1048*4882a593Smuzhiyun 	 * NOTE: If kernel ever resubmits a user CRB after handling a page
1049*4882a593Smuzhiyun 	 *	 fault, we will need to map this into kernel as well.
1050*4882a593Smuzhiyun 	 */
1051*4882a593Smuzhiyun 	if (!txwin->user_win) {
1052*4882a593Smuzhiyun 		txwin->paste_kaddr = map_paste_region(txwin);
1053*4882a593Smuzhiyun 		if (IS_ERR(txwin->paste_kaddr)) {
1054*4882a593Smuzhiyun 			rc = PTR_ERR(txwin->paste_kaddr);
1055*4882a593Smuzhiyun 			goto free_window;
1056*4882a593Smuzhiyun 		}
1057*4882a593Smuzhiyun 	} else {
1058*4882a593Smuzhiyun 		/*
1059*4882a593Smuzhiyun 		 * Interrupt hanlder or fault window setup failed. Means
1060*4882a593Smuzhiyun 		 * NX can not generate fault for page fault. So not
1061*4882a593Smuzhiyun 		 * opening for user space tx window.
1062*4882a593Smuzhiyun 		 */
1063*4882a593Smuzhiyun 		if (!vinst->virq) {
1064*4882a593Smuzhiyun 			rc = -ENODEV;
1065*4882a593Smuzhiyun 			goto free_window;
1066*4882a593Smuzhiyun 		}
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 		/*
1069*4882a593Smuzhiyun 		 * Window opened by a child thread may not be closed when
1070*4882a593Smuzhiyun 		 * it exits. So take reference to its pid and release it
1071*4882a593Smuzhiyun 		 * when the window is free by parent thread.
1072*4882a593Smuzhiyun 		 * Acquire a reference to the task's pid to make sure
1073*4882a593Smuzhiyun 		 * pid will not be re-used - needed only for multithread
1074*4882a593Smuzhiyun 		 * applications.
1075*4882a593Smuzhiyun 		 */
1076*4882a593Smuzhiyun 		txwin->pid = get_task_pid(current, PIDTYPE_PID);
1077*4882a593Smuzhiyun 		/*
1078*4882a593Smuzhiyun 		 * Acquire a reference to the task's mm.
1079*4882a593Smuzhiyun 		 */
1080*4882a593Smuzhiyun 		txwin->mm = get_task_mm(current);
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 		if (!txwin->mm) {
1083*4882a593Smuzhiyun 			put_pid(txwin->pid);
1084*4882a593Smuzhiyun 			pr_err("VAS: pid(%d): mm_struct is not found\n",
1085*4882a593Smuzhiyun 					current->pid);
1086*4882a593Smuzhiyun 			rc = -EPERM;
1087*4882a593Smuzhiyun 			goto free_window;
1088*4882a593Smuzhiyun 		}
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 		mmgrab(txwin->mm);
1091*4882a593Smuzhiyun 		mmput(txwin->mm);
1092*4882a593Smuzhiyun 		mm_context_add_vas_window(txwin->mm);
1093*4882a593Smuzhiyun 		/*
1094*4882a593Smuzhiyun 		 * Process closes window during exit. In the case of
1095*4882a593Smuzhiyun 		 * multithread application, the child thread can open
1096*4882a593Smuzhiyun 		 * window and can exit without closing it. so takes tgid
1097*4882a593Smuzhiyun 		 * reference until window closed to make sure tgid is not
1098*4882a593Smuzhiyun 		 * reused.
1099*4882a593Smuzhiyun 		 */
1100*4882a593Smuzhiyun 		txwin->tgid = find_get_pid(task_tgid_vnr(current));
1101*4882a593Smuzhiyun 		/*
1102*4882a593Smuzhiyun 		 * Even a process that has no foreign real address mapping can
1103*4882a593Smuzhiyun 		 * use an unpaired COPY instruction (to no real effect). Issue
1104*4882a593Smuzhiyun 		 * CP_ABORT to clear any pending COPY and prevent a covert
1105*4882a593Smuzhiyun 		 * channel.
1106*4882a593Smuzhiyun 		 *
1107*4882a593Smuzhiyun 		 * __switch_to() will issue CP_ABORT on future context switches
1108*4882a593Smuzhiyun 		 * if process / thread has any open VAS window (Use
1109*4882a593Smuzhiyun 		 * current->mm->context.vas_windows).
1110*4882a593Smuzhiyun 		 */
1111*4882a593Smuzhiyun 		asm volatile(PPC_CP_ABORT);
1112*4882a593Smuzhiyun 	}
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	set_vinst_win(vinst, txwin);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	return txwin;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun free_window:
1119*4882a593Smuzhiyun 	vas_window_free(txwin);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun put_rxwin:
1122*4882a593Smuzhiyun 	put_rx_win(rxwin);
1123*4882a593Smuzhiyun 	return ERR_PTR(rc);
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vas_tx_win_open);
1127*4882a593Smuzhiyun 
vas_copy_crb(void * crb,int offset)1128*4882a593Smuzhiyun int vas_copy_crb(void *crb, int offset)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun 	return vas_copy(crb, offset);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vas_copy_crb);
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun #define RMA_LSMP_REPORT_ENABLE PPC_BIT(53)
vas_paste_crb(struct vas_window * txwin,int offset,bool re)1135*4882a593Smuzhiyun int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun 	int rc;
1138*4882a593Smuzhiyun 	void *addr;
1139*4882a593Smuzhiyun 	uint64_t val;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	trace_vas_paste_crb(current, txwin);
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	/*
1144*4882a593Smuzhiyun 	 * Only NX windows are supported for now and hardware assumes
1145*4882a593Smuzhiyun 	 * report-enable flag is set for NX windows. Ensure software
1146*4882a593Smuzhiyun 	 * complies too.
1147*4882a593Smuzhiyun 	 */
1148*4882a593Smuzhiyun 	WARN_ON_ONCE(txwin->nx_win && !re);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	addr = txwin->paste_kaddr;
1151*4882a593Smuzhiyun 	if (re) {
1152*4882a593Smuzhiyun 		/*
1153*4882a593Smuzhiyun 		 * Set the REPORT_ENABLE bit (equivalent to writing
1154*4882a593Smuzhiyun 		 * to 1K offset of the paste address)
1155*4882a593Smuzhiyun 		 */
1156*4882a593Smuzhiyun 		val = SET_FIELD(RMA_LSMP_REPORT_ENABLE, 0ULL, 1);
1157*4882a593Smuzhiyun 		addr += val;
1158*4882a593Smuzhiyun 	}
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	/*
1161*4882a593Smuzhiyun 	 * Map the raw CR value from vas_paste() to an error code (there
1162*4882a593Smuzhiyun 	 * is just pass or fail for now though).
1163*4882a593Smuzhiyun 	 */
1164*4882a593Smuzhiyun 	rc = vas_paste(addr, offset);
1165*4882a593Smuzhiyun 	if (rc == 2)
1166*4882a593Smuzhiyun 		rc = 0;
1167*4882a593Smuzhiyun 	else
1168*4882a593Smuzhiyun 		rc = -EINVAL;
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	pr_debug("Txwin #%d: Msg count %llu\n", txwin->winid,
1171*4882a593Smuzhiyun 			read_hvwc_reg(txwin, VREG(LRFIFO_PUSH)));
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	return rc;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vas_paste_crb);
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun /*
1178*4882a593Smuzhiyun  * If credit checking is enabled for this window, poll for the return
1179*4882a593Smuzhiyun  * of window credits (i.e for NX engines to process any outstanding CRBs).
1180*4882a593Smuzhiyun  * Since NX-842 waits for the CRBs to be processed before closing the
1181*4882a593Smuzhiyun  * window, we should not have to wait for too long.
1182*4882a593Smuzhiyun  *
1183*4882a593Smuzhiyun  * TODO: We retry in 10ms intervals now. We could/should probably peek at
1184*4882a593Smuzhiyun  *	the VAS_LRFIFO_PUSH_OFFSET register to get an estimate of pending
1185*4882a593Smuzhiyun  *	CRBs on the FIFO and compute the delay dynamically on each retry.
1186*4882a593Smuzhiyun  *	But that is not really needed until we support NX-GZIP access from
1187*4882a593Smuzhiyun  *	user space. (NX-842 driver waits for CSB and Fast thread-wakeup
1188*4882a593Smuzhiyun  *	doesn't use credit checking).
1189*4882a593Smuzhiyun  */
poll_window_credits(struct vas_window * window)1190*4882a593Smuzhiyun static void poll_window_credits(struct vas_window *window)
1191*4882a593Smuzhiyun {
1192*4882a593Smuzhiyun 	u64 val;
1193*4882a593Smuzhiyun 	int creds, mode;
1194*4882a593Smuzhiyun 	int count = 0;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	val = read_hvwc_reg(window, VREG(WINCTL));
1197*4882a593Smuzhiyun 	if (window->tx_win)
1198*4882a593Smuzhiyun 		mode = GET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val);
1199*4882a593Smuzhiyun 	else
1200*4882a593Smuzhiyun 		mode = GET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val);
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	if (!mode)
1203*4882a593Smuzhiyun 		return;
1204*4882a593Smuzhiyun retry:
1205*4882a593Smuzhiyun 	if (window->tx_win) {
1206*4882a593Smuzhiyun 		val = read_hvwc_reg(window, VREG(TX_WCRED));
1207*4882a593Smuzhiyun 		creds = GET_FIELD(VAS_TX_WCRED, val);
1208*4882a593Smuzhiyun 	} else {
1209*4882a593Smuzhiyun 		val = read_hvwc_reg(window, VREG(LRX_WCRED));
1210*4882a593Smuzhiyun 		creds = GET_FIELD(VAS_LRX_WCRED, val);
1211*4882a593Smuzhiyun 	}
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	/*
1214*4882a593Smuzhiyun 	 * Takes around few milliseconds to complete all pending requests
1215*4882a593Smuzhiyun 	 * and return credits.
1216*4882a593Smuzhiyun 	 * TODO: Scan fault FIFO and invalidate CRBs points to this window
1217*4882a593Smuzhiyun 	 *       and issue CRB Kill to stop all pending requests. Need only
1218*4882a593Smuzhiyun 	 *       if there is a bug in NX or fault handling in kernel.
1219*4882a593Smuzhiyun 	 */
1220*4882a593Smuzhiyun 	if (creds < window->wcreds_max) {
1221*4882a593Smuzhiyun 		val = 0;
1222*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
1223*4882a593Smuzhiyun 		schedule_timeout(msecs_to_jiffies(10));
1224*4882a593Smuzhiyun 		count++;
1225*4882a593Smuzhiyun 		/*
1226*4882a593Smuzhiyun 		 * Process can not close send window until all credits are
1227*4882a593Smuzhiyun 		 * returned.
1228*4882a593Smuzhiyun 		 */
1229*4882a593Smuzhiyun 		if (!(count % 1000))
1230*4882a593Smuzhiyun 			pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n",
1231*4882a593Smuzhiyun 				vas_window_pid(window), window->winid,
1232*4882a593Smuzhiyun 				creds, count);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 		goto retry;
1235*4882a593Smuzhiyun 	}
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun /*
1239*4882a593Smuzhiyun  * Wait for the window to go to "not-busy" state. It should only take a
1240*4882a593Smuzhiyun  * short time to queue a CRB, so window should not be busy for too long.
1241*4882a593Smuzhiyun  * Trying 5ms intervals.
1242*4882a593Smuzhiyun  */
poll_window_busy_state(struct vas_window * window)1243*4882a593Smuzhiyun static void poll_window_busy_state(struct vas_window *window)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun 	int busy;
1246*4882a593Smuzhiyun 	u64 val;
1247*4882a593Smuzhiyun 	int count = 0;
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun retry:
1250*4882a593Smuzhiyun 	val = read_hvwc_reg(window, VREG(WIN_STATUS));
1251*4882a593Smuzhiyun 	busy = GET_FIELD(VAS_WIN_BUSY, val);
1252*4882a593Smuzhiyun 	if (busy) {
1253*4882a593Smuzhiyun 		val = 0;
1254*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
1255*4882a593Smuzhiyun 		schedule_timeout(msecs_to_jiffies(10));
1256*4882a593Smuzhiyun 		count++;
1257*4882a593Smuzhiyun 		/*
1258*4882a593Smuzhiyun 		 * Takes around few milliseconds to process all pending
1259*4882a593Smuzhiyun 		 * requests.
1260*4882a593Smuzhiyun 		 */
1261*4882a593Smuzhiyun 		if (!(count % 1000))
1262*4882a593Smuzhiyun 			pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n",
1263*4882a593Smuzhiyun 				vas_window_pid(window), window->winid, count);
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 		goto retry;
1266*4882a593Smuzhiyun 	}
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun /*
1270*4882a593Smuzhiyun  * Have the hardware cast a window out of cache and wait for it to
1271*4882a593Smuzhiyun  * be completed.
1272*4882a593Smuzhiyun  *
1273*4882a593Smuzhiyun  * NOTE: It can take a relatively long time to cast the window context
1274*4882a593Smuzhiyun  *	out of the cache. It is not strictly necessary to cast out if:
1275*4882a593Smuzhiyun  *
1276*4882a593Smuzhiyun  *	- we clear the "Pin Window" bit (so hardware is free to evict)
1277*4882a593Smuzhiyun  *
1278*4882a593Smuzhiyun  *	- we re-initialize the window context when it is reassigned.
1279*4882a593Smuzhiyun  *
1280*4882a593Smuzhiyun  *	We do the former in vas_win_close() and latter in vas_win_open().
1281*4882a593Smuzhiyun  *	So, ignoring the cast-out for now. We can add it as needed. If
1282*4882a593Smuzhiyun  *	casting out becomes necessary we should consider offloading the
1283*4882a593Smuzhiyun  *	job to a worker thread, so the window close can proceed quickly.
1284*4882a593Smuzhiyun  */
poll_window_castout(struct vas_window * window)1285*4882a593Smuzhiyun static void poll_window_castout(struct vas_window *window)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun 	/* stub for now */
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun /*
1291*4882a593Smuzhiyun  * Unpin and close a window so no new requests are accepted and the
1292*4882a593Smuzhiyun  * hardware can evict this window from cache if necessary.
1293*4882a593Smuzhiyun  */
unpin_close_window(struct vas_window * window)1294*4882a593Smuzhiyun static void unpin_close_window(struct vas_window *window)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun 	u64 val;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	val = read_hvwc_reg(window, VREG(WINCTL));
1299*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_PIN, val, 0);
1300*4882a593Smuzhiyun 	val = SET_FIELD(VAS_WINCTL_OPEN, val, 0);
1301*4882a593Smuzhiyun 	write_hvwc_reg(window, VREG(WINCTL), val);
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun /*
1305*4882a593Smuzhiyun  * Close a window.
1306*4882a593Smuzhiyun  *
1307*4882a593Smuzhiyun  * See Section 1.12.1 of VAS workbook v1.05 for details on closing window:
1308*4882a593Smuzhiyun  *	- Disable new paste operations (unmap paste address)
1309*4882a593Smuzhiyun  *	- Poll for the "Window Busy" bit to be cleared
1310*4882a593Smuzhiyun  *	- Clear the Open/Enable bit for the Window.
1311*4882a593Smuzhiyun  *	- Poll for return of window Credits (implies FIFO empty for Rx win?)
1312*4882a593Smuzhiyun  *	- Unpin and cast window context out of cache
1313*4882a593Smuzhiyun  *
1314*4882a593Smuzhiyun  * Besides the hardware, kernel has some bookkeeping of course.
1315*4882a593Smuzhiyun  */
vas_win_close(struct vas_window * window)1316*4882a593Smuzhiyun int vas_win_close(struct vas_window *window)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun 	if (!window)
1319*4882a593Smuzhiyun 		return 0;
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	if (!window->tx_win && atomic_read(&window->num_txwins) != 0) {
1322*4882a593Smuzhiyun 		pr_devel("Attempting to close an active Rx window!\n");
1323*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
1324*4882a593Smuzhiyun 		return -EBUSY;
1325*4882a593Smuzhiyun 	}
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	unmap_paste_region(window);
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	poll_window_busy_state(window);
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	unpin_close_window(window);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	poll_window_credits(window);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	clear_vinst_win(window);
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	poll_window_castout(window);
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	/* if send window, drop reference to matching receive window */
1340*4882a593Smuzhiyun 	if (window->tx_win) {
1341*4882a593Smuzhiyun 		if (window->user_win) {
1342*4882a593Smuzhiyun 			/* Drop references to pid. tgid and mm */
1343*4882a593Smuzhiyun 			put_pid(window->pid);
1344*4882a593Smuzhiyun 			put_pid(window->tgid);
1345*4882a593Smuzhiyun 			if (window->mm) {
1346*4882a593Smuzhiyun 				mm_context_remove_vas_window(window->mm);
1347*4882a593Smuzhiyun 				mmdrop(window->mm);
1348*4882a593Smuzhiyun 			}
1349*4882a593Smuzhiyun 		}
1350*4882a593Smuzhiyun 		put_rx_win(window->rxwin);
1351*4882a593Smuzhiyun 	}
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	vas_window_free(window);
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	return 0;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vas_win_close);
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun /*
1360*4882a593Smuzhiyun  * Return credit for the given window.
1361*4882a593Smuzhiyun  * Send windows and fault window uses credit mechanism as follows:
1362*4882a593Smuzhiyun  *
1363*4882a593Smuzhiyun  * Send windows:
1364*4882a593Smuzhiyun  * - The default number of credits available for each send window is
1365*4882a593Smuzhiyun  *   1024. It means 1024 requests can be issued asynchronously at the
1366*4882a593Smuzhiyun  *   same time. If the credit is not available, that request will be
1367*4882a593Smuzhiyun  *   returned with RMA_Busy.
1368*4882a593Smuzhiyun  * - One credit is taken when NX request is issued.
1369*4882a593Smuzhiyun  * - This credit is returned after NX processed that request.
1370*4882a593Smuzhiyun  * - If NX encounters translation error, kernel will return the
1371*4882a593Smuzhiyun  *   credit on the specific send window after processing the fault CRB.
1372*4882a593Smuzhiyun  *
1373*4882a593Smuzhiyun  * Fault window:
1374*4882a593Smuzhiyun  * - The total number credits available is FIFO_SIZE/CRB_SIZE.
1375*4882a593Smuzhiyun  *   Means 4MB/128 in the current implementation. If credit is not
1376*4882a593Smuzhiyun  *   available, RMA_Reject is returned.
1377*4882a593Smuzhiyun  * - A credit is taken when NX pastes CRB in fault FIFO.
1378*4882a593Smuzhiyun  * - The kernel with return credit on fault window after reading entry
1379*4882a593Smuzhiyun  *   from fault FIFO.
1380*4882a593Smuzhiyun  */
vas_return_credit(struct vas_window * window,bool tx)1381*4882a593Smuzhiyun void vas_return_credit(struct vas_window *window, bool tx)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun 	uint64_t val;
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	val = 0ULL;
1386*4882a593Smuzhiyun 	if (tx) { /* send window */
1387*4882a593Smuzhiyun 		val = SET_FIELD(VAS_TX_WCRED, val, 1);
1388*4882a593Smuzhiyun 		write_hvwc_reg(window, VREG(TX_WCRED_ADDER), val);
1389*4882a593Smuzhiyun 	} else {
1390*4882a593Smuzhiyun 		val = SET_FIELD(VAS_LRX_WCRED, val, 1);
1391*4882a593Smuzhiyun 		write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), val);
1392*4882a593Smuzhiyun 	}
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun 
vas_pswid_to_window(struct vas_instance * vinst,uint32_t pswid)1395*4882a593Smuzhiyun struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
1396*4882a593Smuzhiyun 		uint32_t pswid)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun 	struct vas_window *window;
1399*4882a593Smuzhiyun 	int winid;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	if (!pswid) {
1402*4882a593Smuzhiyun 		pr_devel("%s: called for pswid 0!\n", __func__);
1403*4882a593Smuzhiyun 		return ERR_PTR(-ESRCH);
1404*4882a593Smuzhiyun 	}
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	decode_pswid(pswid, NULL, &winid);
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	if (winid >= VAS_WINDOWS_PER_CHIP)
1409*4882a593Smuzhiyun 		return ERR_PTR(-ESRCH);
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	/*
1412*4882a593Smuzhiyun 	 * If application closes the window before the hardware
1413*4882a593Smuzhiyun 	 * returns the fault CRB, we should wait in vas_win_close()
1414*4882a593Smuzhiyun 	 * for the pending requests. so the window must be active
1415*4882a593Smuzhiyun 	 * and the process alive.
1416*4882a593Smuzhiyun 	 *
1417*4882a593Smuzhiyun 	 * If its a kernel process, we should not get any faults and
1418*4882a593Smuzhiyun 	 * should not get here.
1419*4882a593Smuzhiyun 	 */
1420*4882a593Smuzhiyun 	window = vinst->windows[winid];
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	if (!window) {
1423*4882a593Smuzhiyun 		pr_err("PSWID decode: Could not find window for winid %d pswid %d vinst 0x%p\n",
1424*4882a593Smuzhiyun 			winid, pswid, vinst);
1425*4882a593Smuzhiyun 		return NULL;
1426*4882a593Smuzhiyun 	}
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	/*
1429*4882a593Smuzhiyun 	 * Do some sanity checks on the decoded window.  Window should be
1430*4882a593Smuzhiyun 	 * NX GZIP user send window. FTW windows should not incur faults
1431*4882a593Smuzhiyun 	 * since their CRBs are ignored (not queued on FIFO or processed
1432*4882a593Smuzhiyun 	 * by NX).
1433*4882a593Smuzhiyun 	 */
1434*4882a593Smuzhiyun 	if (!window->tx_win || !window->user_win || !window->nx_win ||
1435*4882a593Smuzhiyun 			window->cop == VAS_COP_TYPE_FAULT ||
1436*4882a593Smuzhiyun 			window->cop == VAS_COP_TYPE_FTW) {
1437*4882a593Smuzhiyun 		pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n",
1438*4882a593Smuzhiyun 			winid, window->tx_win, window->user_win,
1439*4882a593Smuzhiyun 			window->nx_win, window->cop);
1440*4882a593Smuzhiyun 		WARN_ON(1);
1441*4882a593Smuzhiyun 	}
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	return window;
1444*4882a593Smuzhiyun }
1445