xref: /OK3568_Linux_fs/kernel/arch/arm64/mm/dma-mapping.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 ARM Ltd.
4*4882a593Smuzhiyun  * Author: Catalin Marinas <catalin.marinas@arm.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/gfp.h>
8*4882a593Smuzhiyun #include <linux/cache.h>
9*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
10*4882a593Smuzhiyun #include <linux/dma-iommu.h>
11*4882a593Smuzhiyun #include <xen/xen.h>
12*4882a593Smuzhiyun #include <xen/swiotlb-xen.h>
13*4882a593Smuzhiyun #include <trace/hooks/iommu.h>
14*4882a593Smuzhiyun #include <trace/hooks/dma_noalias.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/cacheflush.h>
17*4882a593Smuzhiyun 
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)18*4882a593Smuzhiyun void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
19*4882a593Smuzhiyun 		enum dma_data_direction dir)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	__dma_map_area(phys_to_virt(paddr), size, dir);
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun 
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)24*4882a593Smuzhiyun void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
25*4882a593Smuzhiyun 		enum dma_data_direction dir)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	__dma_unmap_area(phys_to_virt(paddr), size, dir);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
arch_dma_prep_coherent(struct page * page,size_t size)30*4882a593Smuzhiyun void arch_dma_prep_coherent(struct page *page, size_t size)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	__dma_flush_area(page_address(page), size);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_DMA
arch_teardown_dma_ops(struct device * dev)36*4882a593Smuzhiyun void arch_teardown_dma_ops(struct device *dev)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	dev->dma_ops = NULL;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun 
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)42*4882a593Smuzhiyun void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
43*4882a593Smuzhiyun 			const struct iommu_ops *iommu, bool coherent)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	int cls = cache_line_size_of_cpu();
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN,
48*4882a593Smuzhiyun 		   TAINT_CPU_OUT_OF_SPEC,
49*4882a593Smuzhiyun 		   "%s %s: ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
50*4882a593Smuzhiyun 		   dev_driver_string(dev), dev_name(dev),
51*4882a593Smuzhiyun 		   ARCH_DMA_MINALIGN, cls);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	dev->dma_coherent = coherent;
54*4882a593Smuzhiyun 	if (iommu) {
55*4882a593Smuzhiyun 		iommu_setup_dma_ops(dev, dma_base, size);
56*4882a593Smuzhiyun 		trace_android_vh_iommu_setup_dma_ops(dev, dma_base, size);
57*4882a593Smuzhiyun 		trace_android_rvh_iommu_setup_dma_ops(dev, dma_base, size);
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	/* Allow vendor modules to opt-in for the 2454944 erratum workaround */
61*4882a593Smuzhiyun 	trace_android_rvh_setup_dma_ops(dev);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #ifdef CONFIG_XEN
64*4882a593Smuzhiyun 	if (xen_initial_domain())
65*4882a593Smuzhiyun 		dev->dma_ops = &xen_swiotlb_dma_ops;
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #ifdef CONFIG_NO_GKI
70*4882a593Smuzhiyun EXPORT_SYMBOL(__dma_map_area);
71*4882a593Smuzhiyun EXPORT_SYMBOL(__dma_unmap_area);
72*4882a593Smuzhiyun #endif
73