xref: /OK3568_Linux_fs/kernel/arch/sh/include/cpu-sh4/cpu/sq.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * include/asm-sh/cpu-sh4/sq.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2001, 2002, 2003  Paul Mundt
6*4882a593Smuzhiyun  * Copyright (C) 2001, 2002  M. R. Brown
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #ifndef __ASM_CPU_SH4_SQ_H
9*4882a593Smuzhiyun #define __ASM_CPU_SH4_SQ_H
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/addrspace.h>
12*4882a593Smuzhiyun #include <asm/page.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
16*4882a593Smuzhiyun  * mapped to any physical address space. Since data is written (and aligned)
17*4882a593Smuzhiyun  * to 32-byte boundaries, we need to be sure that all allocations are aligned.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun #define SQ_SIZE                 32
20*4882a593Smuzhiyun #define SQ_ALIGN_MASK           (~(SQ_SIZE - 1))
21*4882a593Smuzhiyun #define SQ_ALIGN(addr)          (((addr)+SQ_SIZE-1) & SQ_ALIGN_MASK)
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define SQ_QACR0		(P4SEG_REG_BASE  + 0x38)
24*4882a593Smuzhiyun #define SQ_QACR1		(P4SEG_REG_BASE  + 0x3c)
25*4882a593Smuzhiyun #define SQ_ADDRMAX              (P4SEG_STORE_QUE + 0x04000000)
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* arch/sh/kernel/cpu/sh4/sq.c */
28*4882a593Smuzhiyun unsigned long sq_remap(unsigned long phys, unsigned int size,
29*4882a593Smuzhiyun 		       const char *name, pgprot_t prot);
30*4882a593Smuzhiyun void sq_unmap(unsigned long vaddr);
31*4882a593Smuzhiyun void sq_flush_range(unsigned long start, unsigned int len);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #endif /* __ASM_CPU_SH4_SQ_H */
34