xref: /OK3568_Linux_fs/kernel/drivers/parisc/iommu-helpers.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #include <linux/prefetch.h>
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /**
5*4882a593Smuzhiyun  * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
6*4882a593Smuzhiyun  * @ioc: The I/O Controller.
7*4882a593Smuzhiyun  * @startsg: The scatter/gather list of coalesced chunks.
8*4882a593Smuzhiyun  * @nents: The number of entries in the scatter/gather list.
9*4882a593Smuzhiyun  * @hint: The DMA Hint.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This function inserts the coalesced scatter/gather list chunks into the
12*4882a593Smuzhiyun  * I/O Controller's I/O Pdir.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun static inline unsigned int
iommu_fill_pdir(struct ioc * ioc,struct scatterlist * startsg,int nents,unsigned long hint,void (* iommu_io_pdir_entry)(u64 *,space_t,unsigned long,unsigned long))15*4882a593Smuzhiyun iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
16*4882a593Smuzhiyun 		unsigned long hint,
17*4882a593Smuzhiyun 		void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
18*4882a593Smuzhiyun 					    unsigned long))
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */
21*4882a593Smuzhiyun 	unsigned int n_mappings = 0;
22*4882a593Smuzhiyun 	unsigned long dma_offset = 0, dma_len = 0;
23*4882a593Smuzhiyun 	u64 *pdirp = NULL;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	/* Horrible hack.  For efficiency's sake, dma_sg starts one
26*4882a593Smuzhiyun 	 * entry below the true start (it is immediately incremented
27*4882a593Smuzhiyun 	 * in the loop) */
28*4882a593Smuzhiyun 	 dma_sg--;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	while (nents-- > 0) {
31*4882a593Smuzhiyun 		unsigned long vaddr;
32*4882a593Smuzhiyun 		long size;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 		DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
35*4882a593Smuzhiyun 			   (unsigned long)sg_dma_address(startsg), cnt,
36*4882a593Smuzhiyun 			   sg_virt(startsg), startsg->length
37*4882a593Smuzhiyun 		);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 		/*
41*4882a593Smuzhiyun 		** Look for the start of a new DMA stream
42*4882a593Smuzhiyun 		*/
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 		if (sg_dma_address(startsg) & PIDE_FLAG) {
45*4882a593Smuzhiyun 			u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 			BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 			dma_sg++;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 			dma_len = sg_dma_len(startsg);
52*4882a593Smuzhiyun 			sg_dma_len(startsg) = 0;
53*4882a593Smuzhiyun 			dma_offset = (unsigned long) pide & ~IOVP_MASK;
54*4882a593Smuzhiyun 			n_mappings++;
55*4882a593Smuzhiyun #if defined(ZX1_SUPPORT)
56*4882a593Smuzhiyun 			/* Pluto IOMMU IO Virt Address is not zero based */
57*4882a593Smuzhiyun 			sg_dma_address(dma_sg) = pide | ioc->ibase;
58*4882a593Smuzhiyun #else
59*4882a593Smuzhiyun 			/* SBA, ccio, and dino are zero based.
60*4882a593Smuzhiyun 			 * Trying to save a few CPU cycles for most users.
61*4882a593Smuzhiyun 			 */
62*4882a593Smuzhiyun 			sg_dma_address(dma_sg) = pide;
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun 			pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
65*4882a593Smuzhiyun 			prefetchw(pdirp);
66*4882a593Smuzhiyun 		}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		BUG_ON(pdirp == NULL);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		vaddr = (unsigned long)sg_virt(startsg);
71*4882a593Smuzhiyun 		sg_dma_len(dma_sg) += startsg->length;
72*4882a593Smuzhiyun 		size = startsg->length + dma_offset;
73*4882a593Smuzhiyun 		dma_offset = 0;
74*4882a593Smuzhiyun #ifdef IOMMU_MAP_STATS
75*4882a593Smuzhiyun 		ioc->msg_pages += startsg->length >> IOVP_SHIFT;
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun 		do {
78*4882a593Smuzhiyun 			iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
79*4882a593Smuzhiyun 					    vaddr, hint);
80*4882a593Smuzhiyun 			vaddr += IOVP_SIZE;
81*4882a593Smuzhiyun 			size -= IOVP_SIZE;
82*4882a593Smuzhiyun 			pdirp++;
83*4882a593Smuzhiyun 		} while(unlikely(size > 0));
84*4882a593Smuzhiyun 		startsg++;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 	return(n_mappings);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun ** First pass is to walk the SG list and determine where the breaks are
92*4882a593Smuzhiyun ** in the DMA stream. Allocates PDIR entries but does not fill them.
93*4882a593Smuzhiyun ** Returns the number of DMA chunks.
94*4882a593Smuzhiyun **
95*4882a593Smuzhiyun ** Doing the fill separate from the coalescing/allocation keeps the
96*4882a593Smuzhiyun ** code simpler. Future enhancement could make one pass through
97*4882a593Smuzhiyun ** the sglist do both.
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static inline unsigned int
iommu_coalesce_chunks(struct ioc * ioc,struct device * dev,struct scatterlist * startsg,int nents,int (* iommu_alloc_range)(struct ioc *,struct device *,size_t))101*4882a593Smuzhiyun iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
102*4882a593Smuzhiyun 		struct scatterlist *startsg, int nents,
103*4882a593Smuzhiyun 		int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct scatterlist *contig_sg;	   /* contig chunk head */
106*4882a593Smuzhiyun 	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
107*4882a593Smuzhiyun 	unsigned int n_mappings = 0;
108*4882a593Smuzhiyun 	unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
109*4882a593Smuzhiyun 					(unsigned)DMA_CHUNK_SIZE);
110*4882a593Smuzhiyun 	unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
111*4882a593Smuzhiyun 	if (max_seg_boundary)	/* check if the addition above didn't overflow */
112*4882a593Smuzhiyun 		max_seg_size = min(max_seg_size, max_seg_boundary);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	while (nents > 0) {
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 		/*
117*4882a593Smuzhiyun 		** Prepare for first/next DMA stream
118*4882a593Smuzhiyun 		*/
119*4882a593Smuzhiyun 		contig_sg = startsg;
120*4882a593Smuzhiyun 		dma_len = startsg->length;
121*4882a593Smuzhiyun 		dma_offset = startsg->offset;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 		/* PARANOID: clear entries */
124*4882a593Smuzhiyun 		sg_dma_address(startsg) = 0;
125*4882a593Smuzhiyun 		sg_dma_len(startsg) = 0;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 		/*
128*4882a593Smuzhiyun 		** This loop terminates one iteration "early" since
129*4882a593Smuzhiyun 		** it's always looking one "ahead".
130*4882a593Smuzhiyun 		*/
131*4882a593Smuzhiyun 		while(--nents > 0) {
132*4882a593Smuzhiyun 			unsigned long prev_end, sg_start;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 			prev_end = (unsigned long)sg_virt(startsg) +
135*4882a593Smuzhiyun 							startsg->length;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 			startsg++;
138*4882a593Smuzhiyun 			sg_start = (unsigned long)sg_virt(startsg);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 			/* PARANOID: clear entries */
141*4882a593Smuzhiyun 			sg_dma_address(startsg) = 0;
142*4882a593Smuzhiyun 			sg_dma_len(startsg) = 0;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 			/*
145*4882a593Smuzhiyun 			** First make sure current dma stream won't
146*4882a593Smuzhiyun 			** exceed max_seg_size if we coalesce the
147*4882a593Smuzhiyun 			** next entry.
148*4882a593Smuzhiyun 			*/
149*4882a593Smuzhiyun 			if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
150*4882a593Smuzhiyun 				     max_seg_size))
151*4882a593Smuzhiyun 				break;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 			/*
154*4882a593Smuzhiyun 			* Next see if we can append the next chunk (i.e.
155*4882a593Smuzhiyun 			* it must end on one page and begin on another, or
156*4882a593Smuzhiyun 			* it must start on the same address as the previous
157*4882a593Smuzhiyun 			* entry ended.
158*4882a593Smuzhiyun 			*/
159*4882a593Smuzhiyun 			if (unlikely((prev_end != sg_start) ||
160*4882a593Smuzhiyun 				((prev_end | sg_start) & ~PAGE_MASK)))
161*4882a593Smuzhiyun 				break;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 			dma_len += startsg->length;
164*4882a593Smuzhiyun 		}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		/*
167*4882a593Smuzhiyun 		** End of DMA Stream
168*4882a593Smuzhiyun 		** Terminate last VCONTIG block.
169*4882a593Smuzhiyun 		** Allocate space for DMA stream.
170*4882a593Smuzhiyun 		*/
171*4882a593Smuzhiyun 		sg_dma_len(contig_sg) = dma_len;
172*4882a593Smuzhiyun 		dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
173*4882a593Smuzhiyun 		sg_dma_address(contig_sg) =
174*4882a593Smuzhiyun 			PIDE_FLAG
175*4882a593Smuzhiyun 			| (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
176*4882a593Smuzhiyun 			| dma_offset;
177*4882a593Smuzhiyun 		n_mappings++;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	return n_mappings;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183