1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef _PARISC_DMA_MAPPING_H 3*4882a593Smuzhiyun #define _PARISC_DMA_MAPPING_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun /* 6*4882a593Smuzhiyun ** We need to support 4 different coherent dma models with one binary: 7*4882a593Smuzhiyun ** 8*4882a593Smuzhiyun ** I/O MMU consistent method dma_sync behavior 9*4882a593Smuzhiyun ** ============= ====================== ======================= 10*4882a593Smuzhiyun ** a) PA-7x00LC uncachable host memory flush/purge 11*4882a593Smuzhiyun ** b) U2/Uturn cachable host memory NOP 12*4882a593Smuzhiyun ** c) Ike/Astro cachable host memory NOP 13*4882a593Smuzhiyun ** d) EPIC/SAGA memory on EPIC/SAGA flush/reset DMA channel 14*4882a593Smuzhiyun ** 15*4882a593Smuzhiyun ** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU. 16*4882a593Smuzhiyun ** 17*4882a593Smuzhiyun ** Systems (eg PCX-T workstations) that don't fall into the above 18*4882a593Smuzhiyun ** categories will need to modify the needed drivers to perform 19*4882a593Smuzhiyun ** flush/purge and allocate "regular" cacheable pages for everything. 20*4882a593Smuzhiyun */ 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun extern const struct dma_map_ops *hppa_dma_ops; 23*4882a593Smuzhiyun get_arch_dma_ops(struct bus_type * bus)24*4882a593Smuzhiyunstatic inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 25*4882a593Smuzhiyun { 26*4882a593Smuzhiyun return hppa_dma_ops; 27*4882a593Smuzhiyun } 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun #endif 30