xref: /OK3568_Linux_fs/kernel/arch/s390/include/asm/idals.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4*4882a593Smuzhiyun  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
5*4882a593Smuzhiyun  * Bugreports.to..: <Linux390@de.ibm.com>
6*4882a593Smuzhiyun  * Copyright IBM Corp. 2000
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * History of changes
9*4882a593Smuzhiyun  * 07/24/00 new file
10*4882a593Smuzhiyun  * 05/04/02 code restructuring.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #ifndef _S390_IDALS_H
14*4882a593Smuzhiyun #define _S390_IDALS_H
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/errno.h>
17*4882a593Smuzhiyun #include <linux/err.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <asm/cio.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
24*4882a593Smuzhiyun #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Test if an address/length pair needs an idal list.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun static inline int
idal_is_needed(void * vaddr,unsigned int length)30*4882a593Smuzhiyun idal_is_needed(void *vaddr, unsigned int length)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	return ((__pa(vaddr) + length - 1) >> 31) != 0;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Return the number of idal words needed for an address/length pair.
38*4882a593Smuzhiyun  */
idal_nr_words(void * vaddr,unsigned int length)39*4882a593Smuzhiyun static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
42*4882a593Smuzhiyun 		(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Create the list of idal words for an address/length pair.
47*4882a593Smuzhiyun  */
idal_create_words(unsigned long * idaws,void * vaddr,unsigned int length)48*4882a593Smuzhiyun static inline unsigned long *idal_create_words(unsigned long *idaws,
49*4882a593Smuzhiyun 					       void *vaddr, unsigned int length)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	unsigned long paddr;
52*4882a593Smuzhiyun 	unsigned int cidaw;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	paddr = __pa(vaddr);
55*4882a593Smuzhiyun 	cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
56*4882a593Smuzhiyun 		 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
57*4882a593Smuzhiyun 	*idaws++ = paddr;
58*4882a593Smuzhiyun 	paddr &= -IDA_BLOCK_SIZE;
59*4882a593Smuzhiyun 	while (--cidaw > 0) {
60*4882a593Smuzhiyun 		paddr += IDA_BLOCK_SIZE;
61*4882a593Smuzhiyun 		*idaws++ = paddr;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 	return idaws;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * Sets the address of the data in CCW.
68*4882a593Smuzhiyun  * If necessary it allocates an IDAL and sets the appropriate flags.
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun static inline int
set_normalized_cda(struct ccw1 * ccw,void * vaddr)71*4882a593Smuzhiyun set_normalized_cda(struct ccw1 * ccw, void *vaddr)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	unsigned int nridaws;
74*4882a593Smuzhiyun 	unsigned long *idal;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	if (ccw->flags & CCW_FLAG_IDA)
77*4882a593Smuzhiyun 		return -EINVAL;
78*4882a593Smuzhiyun 	nridaws = idal_nr_words(vaddr, ccw->count);
79*4882a593Smuzhiyun 	if (nridaws > 0) {
80*4882a593Smuzhiyun 		idal = kmalloc(nridaws * sizeof(unsigned long),
81*4882a593Smuzhiyun 			       GFP_ATOMIC | GFP_DMA );
82*4882a593Smuzhiyun 		if (idal == NULL)
83*4882a593Smuzhiyun 			return -ENOMEM;
84*4882a593Smuzhiyun 		idal_create_words(idal, vaddr, ccw->count);
85*4882a593Smuzhiyun 		ccw->flags |= CCW_FLAG_IDA;
86*4882a593Smuzhiyun 		vaddr = idal;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 	ccw->cda = (__u32)(unsigned long) vaddr;
89*4882a593Smuzhiyun 	return 0;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * Releases any allocated IDAL related to the CCW.
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun static inline void
clear_normalized_cda(struct ccw1 * ccw)96*4882a593Smuzhiyun clear_normalized_cda(struct ccw1 * ccw)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	if (ccw->flags & CCW_FLAG_IDA) {
99*4882a593Smuzhiyun 		kfree((void *)(unsigned long) ccw->cda);
100*4882a593Smuzhiyun 		ccw->flags &= ~CCW_FLAG_IDA;
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 	ccw->cda = 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun  * Idal buffer extension
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun struct idal_buffer {
109*4882a593Smuzhiyun 	size_t size;
110*4882a593Smuzhiyun 	size_t page_order;
111*4882a593Smuzhiyun 	void *data[0];
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * Allocate an idal buffer
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun static inline struct idal_buffer *
idal_buffer_alloc(size_t size,int page_order)118*4882a593Smuzhiyun idal_buffer_alloc(size_t size, int page_order)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct idal_buffer *ib;
121*4882a593Smuzhiyun 	int nr_chunks, nr_ptrs, i;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
124*4882a593Smuzhiyun 	nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
125*4882a593Smuzhiyun 	ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL);
126*4882a593Smuzhiyun 	if (ib == NULL)
127*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
128*4882a593Smuzhiyun 	ib->size = size;
129*4882a593Smuzhiyun 	ib->page_order = page_order;
130*4882a593Smuzhiyun 	for (i = 0; i < nr_ptrs; i++) {
131*4882a593Smuzhiyun 		if ((i & (nr_chunks - 1)) != 0) {
132*4882a593Smuzhiyun 			ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
133*4882a593Smuzhiyun 			continue;
134*4882a593Smuzhiyun 		}
135*4882a593Smuzhiyun 		ib->data[i] = (void *)
136*4882a593Smuzhiyun 			__get_free_pages(GFP_KERNEL, page_order);
137*4882a593Smuzhiyun 		if (ib->data[i] != NULL)
138*4882a593Smuzhiyun 			continue;
139*4882a593Smuzhiyun 		// Not enough memory
140*4882a593Smuzhiyun 		while (i >= nr_chunks) {
141*4882a593Smuzhiyun 			i -= nr_chunks;
142*4882a593Smuzhiyun 			free_pages((unsigned long) ib->data[i],
143*4882a593Smuzhiyun 				   ib->page_order);
144*4882a593Smuzhiyun 		}
145*4882a593Smuzhiyun 		kfree(ib);
146*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 	return ib;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  * Free an idal buffer.
153*4882a593Smuzhiyun  */
154*4882a593Smuzhiyun static inline void
idal_buffer_free(struct idal_buffer * ib)155*4882a593Smuzhiyun idal_buffer_free(struct idal_buffer *ib)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	int nr_chunks, nr_ptrs, i;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
160*4882a593Smuzhiyun 	nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
161*4882a593Smuzhiyun 	for (i = 0; i < nr_ptrs; i += nr_chunks)
162*4882a593Smuzhiyun 		free_pages((unsigned long) ib->data[i], ib->page_order);
163*4882a593Smuzhiyun 	kfree(ib);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun  * Test if a idal list is really needed.
168*4882a593Smuzhiyun  */
169*4882a593Smuzhiyun static inline int
__idal_buffer_is_needed(struct idal_buffer * ib)170*4882a593Smuzhiyun __idal_buffer_is_needed(struct idal_buffer *ib)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	return ib->size > (4096ul << ib->page_order) ||
173*4882a593Smuzhiyun 		idal_is_needed(ib->data[0], ib->size);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun  * Set channel data address to idal buffer.
178*4882a593Smuzhiyun  */
179*4882a593Smuzhiyun static inline void
idal_buffer_set_cda(struct idal_buffer * ib,struct ccw1 * ccw)180*4882a593Smuzhiyun idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	if (__idal_buffer_is_needed(ib)) {
183*4882a593Smuzhiyun 		// setup idals;
184*4882a593Smuzhiyun 		ccw->cda = (u32)(addr_t) ib->data;
185*4882a593Smuzhiyun 		ccw->flags |= CCW_FLAG_IDA;
186*4882a593Smuzhiyun 	} else
187*4882a593Smuzhiyun 		// we do not need idals - use direct addressing
188*4882a593Smuzhiyun 		ccw->cda = (u32)(addr_t) ib->data[0];
189*4882a593Smuzhiyun 	ccw->count = ib->size;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun  * Copy count bytes from an idal buffer to user memory
194*4882a593Smuzhiyun  */
195*4882a593Smuzhiyun static inline size_t
idal_buffer_to_user(struct idal_buffer * ib,void __user * to,size_t count)196*4882a593Smuzhiyun idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	size_t left;
199*4882a593Smuzhiyun 	int i;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	BUG_ON(count > ib->size);
202*4882a593Smuzhiyun 	for (i = 0; count > IDA_BLOCK_SIZE; i++) {
203*4882a593Smuzhiyun 		left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
204*4882a593Smuzhiyun 		if (left)
205*4882a593Smuzhiyun 			return left + count - IDA_BLOCK_SIZE;
206*4882a593Smuzhiyun 		to = (void __user *) to + IDA_BLOCK_SIZE;
207*4882a593Smuzhiyun 		count -= IDA_BLOCK_SIZE;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 	return copy_to_user(to, ib->data[i], count);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun  * Copy count bytes from user memory to an idal buffer
214*4882a593Smuzhiyun  */
215*4882a593Smuzhiyun static inline size_t
idal_buffer_from_user(struct idal_buffer * ib,const void __user * from,size_t count)216*4882a593Smuzhiyun idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	size_t left;
219*4882a593Smuzhiyun 	int i;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	BUG_ON(count > ib->size);
222*4882a593Smuzhiyun 	for (i = 0; count > IDA_BLOCK_SIZE; i++) {
223*4882a593Smuzhiyun 		left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
224*4882a593Smuzhiyun 		if (left)
225*4882a593Smuzhiyun 			return left + count - IDA_BLOCK_SIZE;
226*4882a593Smuzhiyun 		from = (void __user *) from + IDA_BLOCK_SIZE;
227*4882a593Smuzhiyun 		count -= IDA_BLOCK_SIZE;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 	return copy_from_user(ib->data[i], from, count);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #endif
233