xref: /OK3568_Linux_fs/kernel/arch/arc/mm/dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
7*4882a593Smuzhiyun #include <asm/cache.h>
8*4882a593Smuzhiyun #include <asm/cacheflush.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * ARCH specific callbacks for generic noncoherent DMA ops
12*4882a593Smuzhiyun  *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
13*4882a593Smuzhiyun  *  - But still handle both coherent and non-coherent requests from caller
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * For DMA coherent hardware (IOC) generic code suffices
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
arch_dma_prep_coherent(struct page * page,size_t size)18*4882a593Smuzhiyun void arch_dma_prep_coherent(struct page *page, size_t size)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	/*
21*4882a593Smuzhiyun 	 * Evict any existing L1 and/or L2 lines for the backing page
22*4882a593Smuzhiyun 	 * in case it was used earlier as a normal "cached" page.
23*4882a593Smuzhiyun 	 * Yeah this bit us - STAR 9000898266
24*4882a593Smuzhiyun 	 *
25*4882a593Smuzhiyun 	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
26*4882a593Smuzhiyun 	 * can't be used to efficiently flush L1 and/or L2 which need paddr
27*4882a593Smuzhiyun 	 * Currently flush_cache_vmap nukes the L1 cache completely which
28*4882a593Smuzhiyun 	 * will be optimized as a separate commit
29*4882a593Smuzhiyun 	 */
30*4882a593Smuzhiyun 	dma_cache_wback_inv(page_to_phys(page), size);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Cache operations depending on function and direction argument, inspired by
35*4882a593Smuzhiyun  * https://lkml.org/lkml/2018/5/18/979
36*4882a593Smuzhiyun  * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
37*4882a593Smuzhiyun  * dma-mapping: provide a generic dma-noncoherent implementation)"
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  *          |   map          ==  for_device     |   unmap     ==  for_cpu
40*4882a593Smuzhiyun  *          |----------------------------------------------------------------
41*4882a593Smuzhiyun  * TO_DEV   |   writeback        writeback      |   none          none
42*4882a593Smuzhiyun  * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
43*4882a593Smuzhiyun  * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  *     [*] needed for CPU speculative prefetches
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * NOTE: we don't check the validity of direction argument as it is done in
48*4882a593Smuzhiyun  * upper layer functions (in include/linux/dma-mapping.h)
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun 
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)51*4882a593Smuzhiyun void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
52*4882a593Smuzhiyun 		enum dma_data_direction dir)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	switch (dir) {
55*4882a593Smuzhiyun 	case DMA_TO_DEVICE:
56*4882a593Smuzhiyun 		dma_cache_wback(paddr, size);
57*4882a593Smuzhiyun 		break;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	case DMA_FROM_DEVICE:
60*4882a593Smuzhiyun 		dma_cache_inv(paddr, size);
61*4882a593Smuzhiyun 		break;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	case DMA_BIDIRECTIONAL:
64*4882a593Smuzhiyun 		dma_cache_wback_inv(paddr, size);
65*4882a593Smuzhiyun 		break;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	default:
68*4882a593Smuzhiyun 		break;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)72*4882a593Smuzhiyun void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
73*4882a593Smuzhiyun 		enum dma_data_direction dir)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	switch (dir) {
76*4882a593Smuzhiyun 	case DMA_TO_DEVICE:
77*4882a593Smuzhiyun 		break;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
80*4882a593Smuzhiyun 	case DMA_FROM_DEVICE:
81*4882a593Smuzhiyun 	case DMA_BIDIRECTIONAL:
82*4882a593Smuzhiyun 		dma_cache_inv(paddr, size);
83*4882a593Smuzhiyun 		break;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	default:
86*4882a593Smuzhiyun 		break;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun  * Plug in direct dma map ops.
92*4882a593Smuzhiyun  */
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)93*4882a593Smuzhiyun void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
94*4882a593Smuzhiyun 			const struct iommu_ops *iommu, bool coherent)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	/*
97*4882a593Smuzhiyun 	 * IOC hardware snoops all DMA traffic keeping the caches consistent
98*4882a593Smuzhiyun 	 * with memory - eliding need for any explicit cache maintenance of
99*4882a593Smuzhiyun 	 * DMA buffers.
100*4882a593Smuzhiyun 	 */
101*4882a593Smuzhiyun 	if (is_isa_arcv2() && ioc_enable && coherent)
102*4882a593Smuzhiyun 		dev->dma_coherent = true;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	dev_info(dev, "use %scoherent DMA ops\n",
105*4882a593Smuzhiyun 		 dev->dma_coherent ? "" : "non");
106*4882a593Smuzhiyun }
107