xref: /OK3568_Linux_fs/kernel/drivers/media/pci/cx18/cx18-io.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  cx18 driver PCI memory mapped IO access routines
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
6*4882a593Smuzhiyun  *  Copyright (C) 2008  Andy Walls <awalls@md.metrocast.net>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef CX18_IO_H
10*4882a593Smuzhiyun #define CX18_IO_H
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "cx18-driver.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * Readback and retry of MMIO access for reliability:
16*4882a593Smuzhiyun  * The concept was suggested by Steve Toth <stoth@linuxtv.org>.
17*4882a593Smuzhiyun  * The implementation is the fault of Andy Walls <awalls@md.metrocast.net>.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * *write* functions are implied to retry the mmio unless suffixed with _noretry
20*4882a593Smuzhiyun  * *read* functions never retry the mmio (it never helps to do so)
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /* Non byteswapping memory mapped IO */
cx18_raw_readl(struct cx18 * cx,const void __iomem * addr)24*4882a593Smuzhiyun static inline u32 cx18_raw_readl(struct cx18 *cx, const void __iomem *addr)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	return __raw_readl(addr);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static inline
cx18_raw_writel_noretry(struct cx18 * cx,u32 val,void __iomem * addr)30*4882a593Smuzhiyun void cx18_raw_writel_noretry(struct cx18 *cx, u32 val, void __iomem *addr)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	__raw_writel(val, addr);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
cx18_raw_writel(struct cx18 * cx,u32 val,void __iomem * addr)35*4882a593Smuzhiyun static inline void cx18_raw_writel(struct cx18 *cx, u32 val, void __iomem *addr)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	int i;
38*4882a593Smuzhiyun 	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
39*4882a593Smuzhiyun 		cx18_raw_writel_noretry(cx, val, addr);
40*4882a593Smuzhiyun 		if (val == cx18_raw_readl(cx, addr))
41*4882a593Smuzhiyun 			break;
42*4882a593Smuzhiyun 	}
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* Normal memory mapped IO */
cx18_readl(struct cx18 * cx,const void __iomem * addr)46*4882a593Smuzhiyun static inline u32 cx18_readl(struct cx18 *cx, const void __iomem *addr)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return readl(addr);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static inline
cx18_writel_noretry(struct cx18 * cx,u32 val,void __iomem * addr)52*4882a593Smuzhiyun void cx18_writel_noretry(struct cx18 *cx, u32 val, void __iomem *addr)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	writel(val, addr);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
cx18_writel(struct cx18 * cx,u32 val,void __iomem * addr)57*4882a593Smuzhiyun static inline void cx18_writel(struct cx18 *cx, u32 val, void __iomem *addr)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	int i;
60*4882a593Smuzhiyun 	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
61*4882a593Smuzhiyun 		cx18_writel_noretry(cx, val, addr);
62*4882a593Smuzhiyun 		if (val == cx18_readl(cx, addr))
63*4882a593Smuzhiyun 			break;
64*4882a593Smuzhiyun 	}
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun static inline
cx18_writel_expect(struct cx18 * cx,u32 val,void __iomem * addr,u32 eval,u32 mask)68*4882a593Smuzhiyun void cx18_writel_expect(struct cx18 *cx, u32 val, void __iomem *addr,
69*4882a593Smuzhiyun 			u32 eval, u32 mask)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	int i;
72*4882a593Smuzhiyun 	u32 r;
73*4882a593Smuzhiyun 	eval &= mask;
74*4882a593Smuzhiyun 	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
75*4882a593Smuzhiyun 		cx18_writel_noretry(cx, val, addr);
76*4882a593Smuzhiyun 		r = cx18_readl(cx, addr);
77*4882a593Smuzhiyun 		if (r == 0xffffffff && eval != 0xffffffff)
78*4882a593Smuzhiyun 			continue;
79*4882a593Smuzhiyun 		if (eval == (r & mask))
80*4882a593Smuzhiyun 			break;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
cx18_readw(struct cx18 * cx,const void __iomem * addr)84*4882a593Smuzhiyun static inline u16 cx18_readw(struct cx18 *cx, const void __iomem *addr)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	return readw(addr);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun static inline
cx18_writew_noretry(struct cx18 * cx,u16 val,void __iomem * addr)90*4882a593Smuzhiyun void cx18_writew_noretry(struct cx18 *cx, u16 val, void __iomem *addr)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	writew(val, addr);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
cx18_writew(struct cx18 * cx,u16 val,void __iomem * addr)95*4882a593Smuzhiyun static inline void cx18_writew(struct cx18 *cx, u16 val, void __iomem *addr)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	int i;
98*4882a593Smuzhiyun 	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
99*4882a593Smuzhiyun 		cx18_writew_noretry(cx, val, addr);
100*4882a593Smuzhiyun 		if (val == cx18_readw(cx, addr))
101*4882a593Smuzhiyun 			break;
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
cx18_readb(struct cx18 * cx,const void __iomem * addr)105*4882a593Smuzhiyun static inline u8 cx18_readb(struct cx18 *cx, const void __iomem *addr)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	return readb(addr);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun static inline
cx18_writeb_noretry(struct cx18 * cx,u8 val,void __iomem * addr)111*4882a593Smuzhiyun void cx18_writeb_noretry(struct cx18 *cx, u8 val, void __iomem *addr)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	writeb(val, addr);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
cx18_writeb(struct cx18 * cx,u8 val,void __iomem * addr)116*4882a593Smuzhiyun static inline void cx18_writeb(struct cx18 *cx, u8 val, void __iomem *addr)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	int i;
119*4882a593Smuzhiyun 	for (i = 0; i < CX18_MAX_MMIO_WR_RETRIES; i++) {
120*4882a593Smuzhiyun 		cx18_writeb_noretry(cx, val, addr);
121*4882a593Smuzhiyun 		if (val == cx18_readb(cx, addr))
122*4882a593Smuzhiyun 			break;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun static inline
cx18_memcpy_fromio(struct cx18 * cx,void * to,const void __iomem * from,unsigned int len)127*4882a593Smuzhiyun void cx18_memcpy_fromio(struct cx18 *cx, void *to,
128*4882a593Smuzhiyun 			const void __iomem *from, unsigned int len)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	memcpy_fromio(to, from, len);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /* Access "register" region of CX23418 memory mapped I/O */
cx18_write_reg_noretry(struct cx18 * cx,u32 val,u32 reg)137*4882a593Smuzhiyun static inline void cx18_write_reg_noretry(struct cx18 *cx, u32 val, u32 reg)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	cx18_writel_noretry(cx, val, cx->reg_mem + reg);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
cx18_write_reg(struct cx18 * cx,u32 val,u32 reg)142*4882a593Smuzhiyun static inline void cx18_write_reg(struct cx18 *cx, u32 val, u32 reg)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	cx18_writel(cx, val, cx->reg_mem + reg);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
cx18_write_reg_expect(struct cx18 * cx,u32 val,u32 reg,u32 eval,u32 mask)147*4882a593Smuzhiyun static inline void cx18_write_reg_expect(struct cx18 *cx, u32 val, u32 reg,
148*4882a593Smuzhiyun 					 u32 eval, u32 mask)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	cx18_writel_expect(cx, val, cx->reg_mem + reg, eval, mask);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
cx18_read_reg(struct cx18 * cx,u32 reg)153*4882a593Smuzhiyun static inline u32 cx18_read_reg(struct cx18 *cx, u32 reg)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	return cx18_readl(cx, cx->reg_mem + reg);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /* Access "encoder memory" region of CX23418 memory mapped I/O */
cx18_write_enc(struct cx18 * cx,u32 val,u32 addr)160*4882a593Smuzhiyun static inline void cx18_write_enc(struct cx18 *cx, u32 val, u32 addr)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	cx18_writel(cx, val, cx->enc_mem + addr);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
cx18_read_enc(struct cx18 * cx,u32 addr)165*4882a593Smuzhiyun static inline u32 cx18_read_enc(struct cx18 *cx, u32 addr)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	return cx18_readl(cx, cx->enc_mem + addr);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun void cx18_sw1_irq_enable(struct cx18 *cx, u32 val);
171*4882a593Smuzhiyun void cx18_sw1_irq_disable(struct cx18 *cx, u32 val);
172*4882a593Smuzhiyun void cx18_sw2_irq_enable(struct cx18 *cx, u32 val);
173*4882a593Smuzhiyun void cx18_sw2_irq_disable(struct cx18 *cx, u32 val);
174*4882a593Smuzhiyun void cx18_sw2_irq_disable_cpu(struct cx18 *cx, u32 val);
175*4882a593Smuzhiyun void cx18_setup_page(struct cx18 *cx, u32 addr);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #endif /* CX18_IO_H */
178