1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*4882a593Smuzhiyun * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*4882a593Smuzhiyun * SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Authors:
24*4882a593Smuzhiyun * Ke Yu
25*4882a593Smuzhiyun * Zhiyuan Lv <zhiyuan.lv@intel.com>
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * Contributors:
28*4882a593Smuzhiyun * Terrence Xu <terrence.xu@intel.com>
29*4882a593Smuzhiyun * Changbin Du <changbin.du@intel.com>
30*4882a593Smuzhiyun * Bing Niu <bing.niu@intel.com>
31*4882a593Smuzhiyun * Zhi Wang <zhi.a.wang@intel.com>
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include "i915_drv.h"
36*4882a593Smuzhiyun #include "gvt.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define GMBUS1_TOTAL_BYTES_SHIFT 16
39*4882a593Smuzhiyun #define GMBUS1_TOTAL_BYTES_MASK 0x1ff
40*4882a593Smuzhiyun #define gmbus1_total_byte_count(v) (((v) >> \
41*4882a593Smuzhiyun GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
42*4882a593Smuzhiyun #define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
43*4882a593Smuzhiyun #define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
44*4882a593Smuzhiyun #define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* GMBUS0 bits definitions */
47*4882a593Smuzhiyun #define _GMBUS_PIN_SEL_MASK (0x7)
48*4882a593Smuzhiyun
edid_get_byte(struct intel_vgpu * vgpu)49*4882a593Smuzhiyun static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
52*4882a593Smuzhiyun unsigned char chr = 0;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
55*4882a593Smuzhiyun gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
56*4882a593Smuzhiyun return 0;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun if (edid->current_edid_read >= EDID_SIZE) {
59*4882a593Smuzhiyun gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
60*4882a593Smuzhiyun return 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun if (!edid->edid_available) {
64*4882a593Smuzhiyun gvt_vgpu_err("Reading EDID but EDID is not available!\n");
65*4882a593Smuzhiyun return 0;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
69*4882a593Smuzhiyun struct intel_vgpu_edid_data *edid_data =
70*4882a593Smuzhiyun intel_vgpu_port(vgpu, edid->port)->edid;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun chr = edid_data->edid_block[edid->current_edid_read];
73*4882a593Smuzhiyun edid->current_edid_read++;
74*4882a593Smuzhiyun } else {
75*4882a593Smuzhiyun gvt_vgpu_err("No EDID available during the reading?\n");
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun return chr;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
cnp_get_port_from_gmbus0(u32 gmbus0)80*4882a593Smuzhiyun static inline int cnp_get_port_from_gmbus0(u32 gmbus0)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
83*4882a593Smuzhiyun int port = -EINVAL;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (port_select == GMBUS_PIN_1_BXT)
86*4882a593Smuzhiyun port = PORT_B;
87*4882a593Smuzhiyun else if (port_select == GMBUS_PIN_2_BXT)
88*4882a593Smuzhiyun port = PORT_C;
89*4882a593Smuzhiyun else if (port_select == GMBUS_PIN_3_BXT)
90*4882a593Smuzhiyun port = PORT_D;
91*4882a593Smuzhiyun else if (port_select == GMBUS_PIN_4_CNP)
92*4882a593Smuzhiyun port = PORT_E;
93*4882a593Smuzhiyun return port;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
bxt_get_port_from_gmbus0(u32 gmbus0)96*4882a593Smuzhiyun static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
99*4882a593Smuzhiyun int port = -EINVAL;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (port_select == GMBUS_PIN_1_BXT)
102*4882a593Smuzhiyun port = PORT_B;
103*4882a593Smuzhiyun else if (port_select == GMBUS_PIN_2_BXT)
104*4882a593Smuzhiyun port = PORT_C;
105*4882a593Smuzhiyun else if (port_select == GMBUS_PIN_3_BXT)
106*4882a593Smuzhiyun port = PORT_D;
107*4882a593Smuzhiyun return port;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
get_port_from_gmbus0(u32 gmbus0)110*4882a593Smuzhiyun static inline int get_port_from_gmbus0(u32 gmbus0)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
113*4882a593Smuzhiyun int port = -EINVAL;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (port_select == GMBUS_PIN_VGADDC)
116*4882a593Smuzhiyun port = PORT_E;
117*4882a593Smuzhiyun else if (port_select == GMBUS_PIN_DPC)
118*4882a593Smuzhiyun port = PORT_C;
119*4882a593Smuzhiyun else if (port_select == GMBUS_PIN_DPB)
120*4882a593Smuzhiyun port = PORT_B;
121*4882a593Smuzhiyun else if (port_select == GMBUS_PIN_DPD)
122*4882a593Smuzhiyun port = PORT_D;
123*4882a593Smuzhiyun return port;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
reset_gmbus_controller(struct intel_vgpu * vgpu)126*4882a593Smuzhiyun static void reset_gmbus_controller(struct intel_vgpu *vgpu)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
129*4882a593Smuzhiyun if (!vgpu->display.i2c_edid.edid_available)
130*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
131*4882a593Smuzhiyun vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* GMBUS0 */
gmbus0_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)135*4882a593Smuzhiyun static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
136*4882a593Smuzhiyun unsigned int offset, void *p_data, unsigned int bytes)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
139*4882a593Smuzhiyun int port, pin_select;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun intel_vgpu_init_i2c_edid(vgpu);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (pin_select == 0)
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (IS_BROXTON(i915))
151*4882a593Smuzhiyun port = bxt_get_port_from_gmbus0(pin_select);
152*4882a593Smuzhiyun else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
153*4882a593Smuzhiyun port = cnp_get_port_from_gmbus0(pin_select);
154*4882a593Smuzhiyun else
155*4882a593Smuzhiyun port = get_port_from_gmbus0(pin_select);
156*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, port < 0))
157*4882a593Smuzhiyun return 0;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun vgpu->display.i2c_edid.state = I2C_GMBUS;
160*4882a593Smuzhiyun vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
163*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
166*4882a593Smuzhiyun !intel_vgpu_port_is_dp(vgpu, port)) {
167*4882a593Smuzhiyun vgpu->display.i2c_edid.port = port;
168*4882a593Smuzhiyun vgpu->display.i2c_edid.edid_available = true;
169*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
170*4882a593Smuzhiyun } else
171*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
172*4882a593Smuzhiyun return 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
gmbus1_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)175*4882a593Smuzhiyun static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
176*4882a593Smuzhiyun void *p_data, unsigned int bytes)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
179*4882a593Smuzhiyun u32 slave_addr;
180*4882a593Smuzhiyun u32 wvalue = *(u32 *)p_data;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
183*4882a593Smuzhiyun if (!(wvalue & GMBUS_SW_CLR_INT)) {
184*4882a593Smuzhiyun vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
185*4882a593Smuzhiyun reset_gmbus_controller(vgpu);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * TODO: "This bit is cleared to zero when an event
189*4882a593Smuzhiyun * causes the HW_RDY bit transition to occur "
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun } else {
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * per bspec setting this bit can cause:
194*4882a593Smuzhiyun * 1) INT status bit cleared
195*4882a593Smuzhiyun * 2) HW_RDY bit asserted
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun if (wvalue & GMBUS_SW_CLR_INT) {
198*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
199*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* For virtualization, we suppose that HW is always ready,
203*4882a593Smuzhiyun * so GMBUS_SW_RDY should always be cleared
204*4882a593Smuzhiyun */
205*4882a593Smuzhiyun if (wvalue & GMBUS_SW_RDY)
206*4882a593Smuzhiyun wvalue &= ~GMBUS_SW_RDY;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun i2c_edid->gmbus.total_byte_count =
209*4882a593Smuzhiyun gmbus1_total_byte_count(wvalue);
210*4882a593Smuzhiyun slave_addr = gmbus1_slave_addr(wvalue);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* vgpu gmbus only support EDID */
213*4882a593Smuzhiyun if (slave_addr == EDID_ADDR) {
214*4882a593Smuzhiyun i2c_edid->slave_selected = true;
215*4882a593Smuzhiyun } else if (slave_addr != 0) {
216*4882a593Smuzhiyun gvt_dbg_dpy(
217*4882a593Smuzhiyun "vgpu%d: unsupported gmbus slave addr(0x%x)\n"
218*4882a593Smuzhiyun " gmbus operations will be ignored.\n",
219*4882a593Smuzhiyun vgpu->id, slave_addr);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (wvalue & GMBUS_CYCLE_INDEX)
223*4882a593Smuzhiyun i2c_edid->current_edid_read =
224*4882a593Smuzhiyun gmbus1_slave_index(wvalue);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
227*4882a593Smuzhiyun switch (gmbus1_bus_cycle(wvalue)) {
228*4882a593Smuzhiyun case GMBUS_NOCYCLE:
229*4882a593Smuzhiyun break;
230*4882a593Smuzhiyun case GMBUS_STOP:
231*4882a593Smuzhiyun /* From spec:
232*4882a593Smuzhiyun * This can only cause a STOP to be generated
233*4882a593Smuzhiyun * if a GMBUS cycle is generated, the GMBUS is
234*4882a593Smuzhiyun * currently in a data/wait/idle phase, or it is in a
235*4882a593Smuzhiyun * WAIT phase
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
238*4882a593Smuzhiyun != GMBUS_NOCYCLE) {
239*4882a593Smuzhiyun intel_vgpu_init_i2c_edid(vgpu);
240*4882a593Smuzhiyun /* After the 'stop' cycle, hw state would become
241*4882a593Smuzhiyun * 'stop phase' and then 'idle phase' after a
242*4882a593Smuzhiyun * few milliseconds. In emulation, we just set
243*4882a593Smuzhiyun * it as 'idle phase' ('stop phase' is not
244*4882a593Smuzhiyun * visible in gmbus interface)
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
247*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun break;
250*4882a593Smuzhiyun case NIDX_NS_W:
251*4882a593Smuzhiyun case IDX_NS_W:
252*4882a593Smuzhiyun case NIDX_STOP:
253*4882a593Smuzhiyun case IDX_STOP:
254*4882a593Smuzhiyun /* From hw spec the GMBUS phase
255*4882a593Smuzhiyun * transition like this:
256*4882a593Smuzhiyun * START (-->INDEX) -->DATA
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
259*4882a593Smuzhiyun vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun default:
262*4882a593Smuzhiyun gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
263*4882a593Smuzhiyun break;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * From hw spec the WAIT state will be
267*4882a593Smuzhiyun * cleared:
268*4882a593Smuzhiyun * (1) in a new GMBUS cycle
269*4882a593Smuzhiyun * (2) by generating a stop
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun vgpu_vreg(vgpu, offset) = wvalue;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
gmbus3_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)276*4882a593Smuzhiyun static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
277*4882a593Smuzhiyun void *p_data, unsigned int bytes)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun drm_WARN_ON(&i915->drm, 1);
282*4882a593Smuzhiyun return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
gmbus3_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)285*4882a593Smuzhiyun static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
286*4882a593Smuzhiyun void *p_data, unsigned int bytes)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun int i;
289*4882a593Smuzhiyun unsigned char byte_data;
290*4882a593Smuzhiyun struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
291*4882a593Smuzhiyun int byte_left = i2c_edid->gmbus.total_byte_count -
292*4882a593Smuzhiyun i2c_edid->current_edid_read;
293*4882a593Smuzhiyun int byte_count = byte_left;
294*4882a593Smuzhiyun u32 reg_data = 0;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Data can only be recevied if previous settings correct */
297*4882a593Smuzhiyun if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
298*4882a593Smuzhiyun if (byte_left <= 0) {
299*4882a593Smuzhiyun memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
300*4882a593Smuzhiyun return 0;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (byte_count > 4)
304*4882a593Smuzhiyun byte_count = 4;
305*4882a593Smuzhiyun for (i = 0; i < byte_count; i++) {
306*4882a593Smuzhiyun byte_data = edid_get_byte(vgpu);
307*4882a593Smuzhiyun reg_data |= (byte_data << (i << 3));
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun memcpy(&vgpu_vreg(vgpu, offset), ®_data, byte_count);
311*4882a593Smuzhiyun memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (byte_left <= 4) {
314*4882a593Smuzhiyun switch (i2c_edid->gmbus.cycle_type) {
315*4882a593Smuzhiyun case NIDX_STOP:
316*4882a593Smuzhiyun case IDX_STOP:
317*4882a593Smuzhiyun i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
318*4882a593Smuzhiyun break;
319*4882a593Smuzhiyun case NIDX_NS_W:
320*4882a593Smuzhiyun case IDX_NS_W:
321*4882a593Smuzhiyun default:
322*4882a593Smuzhiyun i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
323*4882a593Smuzhiyun break;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun intel_vgpu_init_i2c_edid(vgpu);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * Read GMBUS3 during send operation,
329*4882a593Smuzhiyun * return the latest written value
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun } else {
332*4882a593Smuzhiyun memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
333*4882a593Smuzhiyun gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
gmbus2_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)338*4882a593Smuzhiyun static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
339*4882a593Smuzhiyun void *p_data, unsigned int bytes)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun u32 value = vgpu_vreg(vgpu, offset);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
344*4882a593Smuzhiyun vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
345*4882a593Smuzhiyun memcpy(p_data, (void *)&value, bytes);
346*4882a593Smuzhiyun return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
gmbus2_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)349*4882a593Smuzhiyun static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
350*4882a593Smuzhiyun void *p_data, unsigned int bytes)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun u32 wvalue = *(u32 *)p_data;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (wvalue & GMBUS_INUSE)
355*4882a593Smuzhiyun vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
356*4882a593Smuzhiyun /* All other bits are read-only */
357*4882a593Smuzhiyun return 0;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
362*4882a593Smuzhiyun * @vgpu: a vGPU
363*4882a593Smuzhiyun * @offset: reg offset
364*4882a593Smuzhiyun * @p_data: data return buffer
365*4882a593Smuzhiyun * @bytes: access data length
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * This function is used to emulate gmbus register mmio read
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * Returns:
370*4882a593Smuzhiyun * Zero on success, negative error code if failed.
371*4882a593Smuzhiyun *
372*4882a593Smuzhiyun */
intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)373*4882a593Smuzhiyun int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
374*4882a593Smuzhiyun unsigned int offset, void *p_data, unsigned int bytes)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
379*4882a593Smuzhiyun return -EINVAL;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
382*4882a593Smuzhiyun return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
383*4882a593Smuzhiyun else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
384*4882a593Smuzhiyun return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /**
391*4882a593Smuzhiyun * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
392*4882a593Smuzhiyun * @vgpu: a vGPU
393*4882a593Smuzhiyun * @offset: reg offset
394*4882a593Smuzhiyun * @p_data: data return buffer
395*4882a593Smuzhiyun * @bytes: access data length
396*4882a593Smuzhiyun *
397*4882a593Smuzhiyun * This function is used to emulate gmbus register mmio write
398*4882a593Smuzhiyun *
399*4882a593Smuzhiyun * Returns:
400*4882a593Smuzhiyun * Zero on success, negative error code if failed.
401*4882a593Smuzhiyun *
402*4882a593Smuzhiyun */
intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)403*4882a593Smuzhiyun int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
404*4882a593Smuzhiyun unsigned int offset, void *p_data, unsigned int bytes)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
409*4882a593Smuzhiyun return -EINVAL;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
412*4882a593Smuzhiyun return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
413*4882a593Smuzhiyun else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
414*4882a593Smuzhiyun return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
415*4882a593Smuzhiyun else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
416*4882a593Smuzhiyun return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
417*4882a593Smuzhiyun else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
418*4882a593Smuzhiyun return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
421*4882a593Smuzhiyun return 0;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun enum {
425*4882a593Smuzhiyun AUX_CH_CTL = 0,
426*4882a593Smuzhiyun AUX_CH_DATA1,
427*4882a593Smuzhiyun AUX_CH_DATA2,
428*4882a593Smuzhiyun AUX_CH_DATA3,
429*4882a593Smuzhiyun AUX_CH_DATA4,
430*4882a593Smuzhiyun AUX_CH_DATA5
431*4882a593Smuzhiyun };
432*4882a593Smuzhiyun
get_aux_ch_reg(unsigned int offset)433*4882a593Smuzhiyun static inline int get_aux_ch_reg(unsigned int offset)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun int reg;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun switch (offset & 0xff) {
438*4882a593Smuzhiyun case 0x10:
439*4882a593Smuzhiyun reg = AUX_CH_CTL;
440*4882a593Smuzhiyun break;
441*4882a593Smuzhiyun case 0x14:
442*4882a593Smuzhiyun reg = AUX_CH_DATA1;
443*4882a593Smuzhiyun break;
444*4882a593Smuzhiyun case 0x18:
445*4882a593Smuzhiyun reg = AUX_CH_DATA2;
446*4882a593Smuzhiyun break;
447*4882a593Smuzhiyun case 0x1c:
448*4882a593Smuzhiyun reg = AUX_CH_DATA3;
449*4882a593Smuzhiyun break;
450*4882a593Smuzhiyun case 0x20:
451*4882a593Smuzhiyun reg = AUX_CH_DATA4;
452*4882a593Smuzhiyun break;
453*4882a593Smuzhiyun case 0x24:
454*4882a593Smuzhiyun reg = AUX_CH_DATA5;
455*4882a593Smuzhiyun break;
456*4882a593Smuzhiyun default:
457*4882a593Smuzhiyun reg = -1;
458*4882a593Smuzhiyun break;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun return reg;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun #define AUX_CTL_MSG_LENGTH(reg) \
464*4882a593Smuzhiyun ((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
465*4882a593Smuzhiyun DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /**
468*4882a593Smuzhiyun * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
469*4882a593Smuzhiyun * @vgpu: a vGPU
470*4882a593Smuzhiyun * @port_idx: port index
471*4882a593Smuzhiyun * @offset: reg offset
472*4882a593Smuzhiyun * @p_data: write ptr
473*4882a593Smuzhiyun *
474*4882a593Smuzhiyun * This function is used to emulate AUX channel register write
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun */
intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu * vgpu,int port_idx,unsigned int offset,void * p_data)477*4882a593Smuzhiyun void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
478*4882a593Smuzhiyun int port_idx,
479*4882a593Smuzhiyun unsigned int offset,
480*4882a593Smuzhiyun void *p_data)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
483*4882a593Smuzhiyun struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
484*4882a593Smuzhiyun int msg_length, ret_msg_size;
485*4882a593Smuzhiyun int msg, addr, ctrl, op;
486*4882a593Smuzhiyun u32 value = *(u32 *)p_data;
487*4882a593Smuzhiyun int aux_data_for_write = 0;
488*4882a593Smuzhiyun int reg = get_aux_ch_reg(offset);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (reg != AUX_CH_CTL) {
491*4882a593Smuzhiyun vgpu_vreg(vgpu, offset) = value;
492*4882a593Smuzhiyun return;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun msg_length = AUX_CTL_MSG_LENGTH(value);
496*4882a593Smuzhiyun // check the msg in DATA register.
497*4882a593Smuzhiyun msg = vgpu_vreg(vgpu, offset + 4);
498*4882a593Smuzhiyun addr = (msg >> 8) & 0xffff;
499*4882a593Smuzhiyun ctrl = (msg >> 24) & 0xff;
500*4882a593Smuzhiyun op = ctrl >> 4;
501*4882a593Smuzhiyun if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
502*4882a593Smuzhiyun /* The ctl write to clear some states */
503*4882a593Smuzhiyun return;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* Always set the wanted value for vms. */
507*4882a593Smuzhiyun ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
508*4882a593Smuzhiyun vgpu_vreg(vgpu, offset) =
509*4882a593Smuzhiyun DP_AUX_CH_CTL_DONE |
510*4882a593Smuzhiyun ((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
511*4882a593Smuzhiyun DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (msg_length == 3) {
514*4882a593Smuzhiyun if (!(op & GVT_AUX_I2C_MOT)) {
515*4882a593Smuzhiyun /* stop */
516*4882a593Smuzhiyun intel_vgpu_init_i2c_edid(vgpu);
517*4882a593Smuzhiyun } else {
518*4882a593Smuzhiyun /* start or restart */
519*4882a593Smuzhiyun i2c_edid->aux_ch.i2c_over_aux_ch = true;
520*4882a593Smuzhiyun i2c_edid->aux_ch.aux_ch_mot = true;
521*4882a593Smuzhiyun if (addr == 0) {
522*4882a593Smuzhiyun /* reset the address */
523*4882a593Smuzhiyun intel_vgpu_init_i2c_edid(vgpu);
524*4882a593Smuzhiyun } else if (addr == EDID_ADDR) {
525*4882a593Smuzhiyun i2c_edid->state = I2C_AUX_CH;
526*4882a593Smuzhiyun i2c_edid->port = port_idx;
527*4882a593Smuzhiyun i2c_edid->slave_selected = true;
528*4882a593Smuzhiyun if (intel_vgpu_has_monitor_on_port(vgpu,
529*4882a593Smuzhiyun port_idx) &&
530*4882a593Smuzhiyun intel_vgpu_port_is_dp(vgpu, port_idx))
531*4882a593Smuzhiyun i2c_edid->edid_available = true;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
535*4882a593Smuzhiyun /* TODO
536*4882a593Smuzhiyun * We only support EDID reading from I2C_over_AUX. And
537*4882a593Smuzhiyun * we do not expect the index mode to be used. Right now
538*4882a593Smuzhiyun * the WRITE operation is ignored. It is good enough to
539*4882a593Smuzhiyun * support the gfx driver to do EDID access.
540*4882a593Smuzhiyun */
541*4882a593Smuzhiyun } else {
542*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ))
543*4882a593Smuzhiyun return;
544*4882a593Smuzhiyun if (drm_WARN_ON(&i915->drm, msg_length != 4))
545*4882a593Smuzhiyun return;
546*4882a593Smuzhiyun if (i2c_edid->edid_available && i2c_edid->slave_selected) {
547*4882a593Smuzhiyun unsigned char val = edid_get_byte(vgpu);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun aux_data_for_write = (val << 16);
550*4882a593Smuzhiyun } else
551*4882a593Smuzhiyun aux_data_for_write = (0xff << 16);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun /* write the return value in AUX_CH_DATA reg which includes:
554*4882a593Smuzhiyun * ACK of I2C_WRITE
555*4882a593Smuzhiyun * returned byte if it is READ
556*4882a593Smuzhiyun */
557*4882a593Smuzhiyun aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24;
558*4882a593Smuzhiyun vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /**
562*4882a593Smuzhiyun * intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
563*4882a593Smuzhiyun * @vgpu: a vGPU
564*4882a593Smuzhiyun *
565*4882a593Smuzhiyun * This function is used to initialize vGPU i2c edid emulation stuffs
566*4882a593Smuzhiyun *
567*4882a593Smuzhiyun */
intel_vgpu_init_i2c_edid(struct intel_vgpu * vgpu)568*4882a593Smuzhiyun void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun edid->state = I2C_NOT_SPECIFIED;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun edid->port = -1;
575*4882a593Smuzhiyun edid->slave_selected = false;
576*4882a593Smuzhiyun edid->edid_available = false;
577*4882a593Smuzhiyun edid->current_edid_read = 0;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun edid->aux_ch.i2c_over_aux_ch = false;
582*4882a593Smuzhiyun edid->aux_ch.aux_ch_mot = false;
583*4882a593Smuzhiyun }
584