1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) Rockchip Electronics Co.Ltd
4*4882a593Smuzhiyun * Author: Felix Zeng <felix.zeng@rock-chips.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include "rknpu_iommu.h"
8*4882a593Smuzhiyun
rknpu_iommu_dma_alloc_iova(struct iommu_domain * domain,size_t size,u64 dma_limit,struct device * dev)9*4882a593Smuzhiyun dma_addr_t rknpu_iommu_dma_alloc_iova(struct iommu_domain *domain, size_t size,
10*4882a593Smuzhiyun u64 dma_limit, struct device *dev)
11*4882a593Smuzhiyun {
12*4882a593Smuzhiyun struct rknpu_iommu_dma_cookie *cookie = (void *)domain->iova_cookie;
13*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
14*4882a593Smuzhiyun unsigned long shift, iova_len, iova = 0;
15*4882a593Smuzhiyun #if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
16*4882a593Smuzhiyun dma_addr_t limit;
17*4882a593Smuzhiyun #endif
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun shift = iova_shift(iovad);
20*4882a593Smuzhiyun iova_len = size >> shift;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Freeing non-power-of-two-sized allocations back into the IOVA caches
25*4882a593Smuzhiyun * will come back to bite us badly, so we have to waste a bit of space
26*4882a593Smuzhiyun * rounding up anything cacheable to make sure that can't happen. The
27*4882a593Smuzhiyun * order of the unadjusted size will still match upon freeing.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
30*4882a593Smuzhiyun iova_len = roundup_pow_of_two(iova_len);
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE)
34*4882a593Smuzhiyun dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
35*4882a593Smuzhiyun #else
36*4882a593Smuzhiyun if (dev->bus_dma_mask)
37*4882a593Smuzhiyun dma_limit &= dev->bus_dma_mask;
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun if (domain->geometry.force_aperture)
41*4882a593Smuzhiyun dma_limit =
42*4882a593Smuzhiyun min_t(u64, dma_limit, domain->geometry.aperture_end);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE)
45*4882a593Smuzhiyun iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
46*4882a593Smuzhiyun #else
47*4882a593Smuzhiyun limit = min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun iova = alloc_iova_fast(iovad, iova_len, limit, true);
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun return (dma_addr_t)iova << shift;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
rknpu_iommu_dma_free_iova(struct rknpu_iommu_dma_cookie * cookie,dma_addr_t iova,size_t size)55*4882a593Smuzhiyun void rknpu_iommu_dma_free_iova(struct rknpu_iommu_dma_cookie *cookie,
56*4882a593Smuzhiyun dma_addr_t iova, size_t size)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct iova_domain *iovad = &cookie->iovad;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad));
61*4882a593Smuzhiyun }
62